prompt
stringlengths 1.74k
34.3k
| ref
stringlengths 4
432
|
---|---|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: DLYuanGod/TinyGPT-V
# Path: minigpt4/common/registry.py
class Registry:
def register_builder(cls, name):
def wrap(builder_cls):
def register_task(cls, name):
def wrap(task_cls):
def register_model(cls, name):
def wrap(model_cls):
def register_processor(cls, name):
def wrap(processor_cls):
def register_lr_scheduler(cls, name):
def wrap(lr_sched_cls):
def register_runner(cls, name):
def wrap(runner_cls):
def register_path(cls, name, path):
def register(cls, name, obj):
def get_builder_class(cls, name):
def get_model_class(cls, name):
def get_task_class(cls, name):
def get_processor_class(cls, name):
def get_lr_scheduler_class(cls, name):
def get_runner_class(cls, name):
def list_runners(cls):
def list_models(cls):
def list_tasks(cls):
def list_processors(cls):
def list_lr_schedulers(cls):
def list_datasets(cls):
def get_path(cls, name):
def get(cls, name, default=None, no_warning=False):
def unregister(cls, name):
# Path: minigpt4/processors/base_processor.py
class BaseProcessor:
def __init__(self):
self.transform = lambda x: x
return
def __call__(self, item):
return self.transform(item)
@classmethod
def from_config(cls, cfg=None):
return cls()
def build(self, **kwargs):
cfg = OmegaConf.create(kwargs)
return self.from_config(cfg)
# Path: minigpt4/processors/randaugment.py
class RandomAugment(object):
def __init__(self, N=2, M=10, isPIL=False, augs=[]):
self.N = N
self.M = M
self.isPIL = isPIL
if augs:
self.augs = augs
else:
self.augs = list(arg_dict.keys())
def get_random_ops(self):
sampled_ops = np.random.choice(self.augs, self.N)
return [(op, 0.5, self.M) for op in sampled_ops]
def __call__(self, img):
if self.isPIL:
img = np.array(img)
ops = self.get_random_ops()
for name, prob, level in ops:
if np.random.random() > prob:
continue
args = arg_dict[name](level)
img = func_dict[name](img, *args)
return img
# Path: minigpt4/processors/blip_processors.py
import re
from minigpt4.common.registry import registry
from minigpt4.processors.base_processor import BaseProcessor
from minigpt4.processors.randaugment import RandomAugment
from omegaconf import OmegaConf
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
| class BlipImageBaseProcessor(BaseProcessor): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: jianchang512/vocal-separate
# Path: vocal/cfg.py
LANG = "en" if locale.getdefaultlocale()[0].split('_')[0].lower() != 'zh' else "zh"
ROOT_DIR = os.getcwd()
MODEL_DIR = os.path.join(ROOT_DIR, 'pretrained_models')
STATIC_DIR = os.path.join(ROOT_DIR, 'static')
TMP_DIR = os.path.join(STATIC_DIR, 'tmp')
FILES_DIR = os.path.join(STATIC_DIR, 'files')
# Path: vocal/tool.py
def runffmpeg(arg):
def checkupdate():
def openweb(web_address):
# Path: vocal/cfg.py
ROOT_DIR = os.getcwd()
# Path: start.py
import logging
import threading
import sys
import os
import subprocess
from flask import Flask, request, render_template, jsonify, send_from_directory
from gevent.pywsgi import WSGIServer, WSGIHandler,LoggingLogAdapter
from logging.handlers import RotatingFileHandler
from vocal import cfg, tool
from vocal.cfg import ROOT_DIR
from spleeter.separator import Separator
class CustomRequestHandler(WSGIHandler):
def log_request(self):
pass
# 禁用 Werkzeug 默认的日志处理器
log = logging.getLogger('werkzeug')
log.handlers[:] = []
log.setLevel(logging.WARNING)
app = Flask(__name__, static_folder=os.path.join(ROOT_DIR, 'static'), static_url_path='/static',
template_folder=os.path.join(ROOT_DIR, 'templates'))
root_log = logging.getLogger() # Flask的根日志记录器
root_log.handlers = []
root_log.setLevel(logging.WARNING)
# 配置日志
app.logger.setLevel(logging.WARNING) # 设置日志级别为 INFO
# 创建 RotatingFileHandler 对象,设置写入的文件路径和大小限制
file_handler = RotatingFileHandler(os.path.join(ROOT_DIR, 'vocal.log'), maxBytes=1024 * 1024, backupCount=5)
# 创建日志的格式
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# 设置文件处理器的级别和格式
file_handler.setLevel(logging.WARNING)
file_handler.setFormatter(formatter)
# 将文件处理器添加到日志记录器中
app.logger.addHandler(file_handler)
@app.route('/static/<path:filename>')
def static_files(filename):
return send_from_directory(app.config['STATIC_FOLDER'], filename)
@app.route('/')
def index():
return render_template("index.html",cuda=cfg.cuda, language=cfg.LANG,root_dir=ROOT_DIR.replace('\\', '/'))
# 上传音频
@app.route('/upload', methods=['POST'])
def upload():
try:
# 获取上传的文件
audio_file = request.files['audio']
# 如果是mp4
noextname, ext = os.path.splitext(audio_file.filename)
ext = ext.lower()
# 如果是视频,先分离
wav_file = os.path.join(cfg.TMP_DIR, f'{noextname}.wav')
if os.path.exists(wav_file) and os.path.getsize(wav_file) > 0:
return jsonify({'code': 0, 'msg': cfg.transobj['lang1'], "data": os.path.basename(wav_file)})
msg=""
if ext in ['.mp4', '.mov', '.avi', '.mkv', '.mpeg', '.mp3', '.flac']:
video_file = os.path.join(cfg.TMP_DIR, f'{noextname}{ext}')
audio_file.save(video_file)
params = [
"-i",
video_file,
]
if ext not in ['.mp3', '.flac']:
params.append('-vn')
params.append(wav_file)
| rs = tool.runffmpeg(params) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ali-vilab/dreamtalk
# Path: core/networks/transformer.py
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
# Path: core/networks/transformer.py
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
# Path: core/networks/dynamic_linear.py
class DynamicLinear(nn.Module):
def __init__(self, in_planes, out_planes, cond_planes, bias=True, K=4, temperature=30, ratio=4, init_weight=True):
super().__init__()
self.dynamic_conv = DynamicConv(
in_planes,
out_planes,
cond_planes,
kernel_size=1,
stride=1,
padding=0,
bias=bias,
K=K,
ratio=ratio,
temperature=temperature,
init_weight=init_weight,
)
def forward(self, x, cond):
"""
Args:
x (_type_): (L, B, C_in)
cond (_type_): (B, C_style)
Returns:
_type_: (L, B, C_out)
"""
x = x.permute(1, 2, 0).unsqueeze(-1)
out = self.dynamic_conv(x, cond)
# (B, C_out, L, 1)
out = out.squeeze().permute(2, 0, 1)
return out
# Path: core/networks/dynamic_fc_decoder.py
import torch.nn as nn
import torch
from core.networks.transformer import _get_activation_fn, _get_clones
from core.networks.dynamic_linear import DynamicLinear
class DynamicFCDecoderLayer(nn.Module):
def __init__(
self,
d_model,
nhead,
d_style,
dynamic_K,
dynamic_ratio,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
# self.linear1 = nn.Linear(d_model, dim_feedforward)
self.linear1 = DynamicLinear(d_model, dim_feedforward, d_style, K=dynamic_K, ratio=dynamic_ratio)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
# self.linear2 = DynamicLinear(dim_feedforward, d_model, d_style, K=dynamic_K, ratio=dynamic_ratio)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos):
return tensor if pos is None else tensor + pos
def forward_post(
self,
tgt,
memory,
style,
tgt_mask=None,
memory_mask=None,
tgt_key_padding_mask=None,
memory_key_padding_mask=None,
pos=None,
query_pos=None,
):
# q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(tgt, tgt, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(
query=tgt, key=memory, value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask
)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
# tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt, style))), style)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt, style))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
# def forward_pre(
# self,
# tgt,
# memory,
# tgt_mask=None,
# memory_mask=None,
# tgt_key_padding_mask=None,
# memory_key_padding_mask=None,
# pos=None,
# query_pos=None,
# ):
# tgt2 = self.norm1(tgt)
# # q = k = self.with_pos_embed(tgt2, query_pos)
# tgt2 = self.self_attn(tgt2, tgt2, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0]
# tgt = tgt + self.dropout1(tgt2)
# tgt2 = self.norm2(tgt)
# tgt2 = self.multihead_attn(
# query=tgt2, key=memory, value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask
# )[0]
# tgt = tgt + self.dropout2(tgt2)
# tgt2 = self.norm3(tgt)
# tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
# tgt = tgt + self.dropout3(tgt2)
# return tgt
def forward(
self,
tgt,
memory,
style,
tgt_mask=None,
memory_mask=None,
tgt_key_padding_mask=None,
memory_key_padding_mask=None,
pos=None,
query_pos=None,
):
if self.normalize_before:
raise NotImplementedError
# return self.forward_pre(
# tgt, memory, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos
# )
return self.forward_post(
tgt, memory, style, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos
)
class DynamicFCDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
| self.layers = _get_clones(decoder_layer, num_layers) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: jiawei-ren/dreamgaussian4d
# Path: diffusers/src/diffusers/utils/constants.py
USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version
# Path: diffusers/src/diffusers/models/lora.py
class LoRACompatibleLinear(nn.Linear):
"""
A Linear layer that can be used with LoRA.
"""
def __init__(self, *args, lora_layer: Optional[LoRALinearLayer] = None, **kwargs):
super().__init__(*args, **kwargs)
self.lora_layer = lora_layer
def set_lora_layer(self, lora_layer: Optional[LoRALinearLayer]):
self.lora_layer = lora_layer
def _fuse_lora(self, lora_scale: float = 1.0, safe_fusing: bool = False):
if self.lora_layer is None:
return
dtype, device = self.weight.data.dtype, self.weight.data.device
w_orig = self.weight.data.float()
w_up = self.lora_layer.up.weight.data.float()
w_down = self.lora_layer.down.weight.data.float()
if self.lora_layer.network_alpha is not None:
w_up = w_up * self.lora_layer.network_alpha / self.lora_layer.rank
fused_weight = w_orig + (lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0])
if safe_fusing and torch.isnan(fused_weight).any().item():
raise ValueError(
"This LoRA weight seems to be broken. "
f"Encountered NaN values when trying to fuse LoRA weights for {self}."
"LoRA weights will not be fused."
)
self.weight.data = fused_weight.to(device=device, dtype=dtype)
# we can drop the lora layer now
self.lora_layer = None
# offload the up and down matrices to CPU to not blow the memory
self.w_up = w_up.cpu()
self.w_down = w_down.cpu()
self._lora_scale = lora_scale
def _unfuse_lora(self):
if not (getattr(self, "w_up", None) is not None and getattr(self, "w_down", None) is not None):
return
fused_weight = self.weight.data
dtype, device = fused_weight.dtype, fused_weight.device
w_up = self.w_up.to(device=device).float()
w_down = self.w_down.to(device).float()
unfused_weight = fused_weight.float() - (self._lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0])
self.weight.data = unfused_weight.to(device=device, dtype=dtype)
self.w_up = None
self.w_down = None
def forward(self, hidden_states: torch.Tensor, scale: float = 1.0) -> torch.Tensor:
if self.lora_layer is None:
out = super().forward(hidden_states)
return out
else:
out = super().forward(hidden_states) + (scale * self.lora_layer(hidden_states))
return out
# Path: diffusers/src/diffusers/models/activations.py
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import USE_PEFT_BACKEND
from .lora import LoRACompatibleLinear
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ACTIVATION_FUNCTIONS = {
"swish": nn.SiLU(),
"silu": nn.SiLU(),
"mish": nn.Mish(),
"gelu": nn.GELU(),
"relu": nn.ReLU(),
}
def get_activation(act_fn: str) -> nn.Module:
"""Helper function to get activation function from string.
Args:
act_fn (str): Name of activation function.
Returns:
nn.Module: Activation function.
"""
act_fn = act_fn.lower()
if act_fn in ACTIVATION_FUNCTIONS:
return ACTIVATION_FUNCTIONS[act_fn]
else:
raise ValueError(f"Unsupported activation function: {act_fn}")
class GELU(nn.Module):
r"""
GELU activation function with tanh approximation support with `approximate="tanh"`.
Parameters:
dim_in (`int`): The number of channels in the input.
dim_out (`int`): The number of channels in the output.
approximate (`str`, *optional*, defaults to `"none"`): If `"tanh"`, use tanh approximation.
"""
def __init__(self, dim_in: int, dim_out: int, approximate: str = "none"):
super().__init__()
self.proj = nn.Linear(dim_in, dim_out)
self.approximate = approximate
def gelu(self, gate: torch.Tensor) -> torch.Tensor:
if gate.device.type != "mps":
return F.gelu(gate, approximate=self.approximate)
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.float32), approximate=self.approximate).to(dtype=gate.dtype)
def forward(self, hidden_states):
hidden_states = self.proj(hidden_states)
hidden_states = self.gelu(hidden_states)
return hidden_states
class GEGLU(nn.Module):
r"""
A [variant](https://arxiv.org/abs/2002.05202) of the gated linear unit activation function.
Parameters:
dim_in (`int`): The number of channels in the input.
dim_out (`int`): The number of channels in the output.
"""
def __init__(self, dim_in: int, dim_out: int):
super().__init__()
| linear_cls = LoRACompatibleLinear if not USE_PEFT_BACKEND else nn.Linear |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Meituan-AutoML/MobileVLM
# Path: mobilevlm/model/vision_encoder.py
def build_vision_tower(model_cfg, **kwargs):
vision_tower = getattr(model_cfg, 'mm_vision_tower', getattr(model_cfg, 'vision_tower', None))
is_absolute_path_exists = os.path.exists(vision_tower)
if is_absolute_path_exists or vision_tower.startswith("openai") or vision_tower.startswith("laion"):
vision_tower_type = getattr(model_cfg, 'vision_tower_type', None)
if vision_tower_type == "clip":
return CLIPVisionTower(vision_tower, args=model_cfg, **kwargs)
raise ValueError(f'Unknown vision tower: {vision_tower}')
# Path: mobilevlm/model/vision_projector.py
def build_vision_projector(config, delay_load=False, **kwargs):
projector_type = getattr(config, 'mm_projector_type', 'linear')
if projector_type == 'linear':
return nn.Linear(config.mm_hidden_size, config.hidden_size)
elif projector_type.startswith('mlp'):
mlp_gelu_match = re.match(r'^mlp(\d+)x_gelu$', projector_type)
if mlp_gelu_match:
mlp_depth = int(mlp_gelu_match.group(1))
modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)]
for _ in range(1, mlp_depth):
modules.append(nn.GELU())
modules.append(nn.Linear(config.hidden_size, config.hidden_size))
return nn.Sequential(*modules)
elif projector_type.startswith('ldpnet'):
return LDPNetProjector(config)
raise ValueError(f'Unknown projector type: {projector_type}')
# Path: mobilevlm/constants.py
IGNORE_INDEX = -100
# Path: mobilevlm/constants.py
IMAGE_TOKEN_INDEX = -200
# Path: mobilevlm/constants.py
DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
# Path: mobilevlm/constants.py
DEFAULT_IM_START_TOKEN = "<im_start>"
# Path: mobilevlm/constants.py
DEFAULT_IM_END_TOKEN = "<im_end>"
# Path: mobilevlm/model/mobilevlm.py
import torch
import torch.nn as nn
from abc import ABC, abstractmethod
from transformers import AutoTokenizer, BitsAndBytesConfig
from mobilevlm.model.vision_encoder import build_vision_tower
from mobilevlm.model.vision_projector import build_vision_projector
from mobilevlm.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, \
DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
from mobilevlm.model.mobilellama import MobileLlamaForCausalLM
class MobileVLMMetaModel:
def __init__(self, config):
super(MobileVLMMetaModel, self).__init__(config)
if hasattr(config, "mm_vision_tower"):
self.vision_tower = build_vision_tower(config, delay_load=False)
self.mm_projector = build_vision_projector(config)
def get_vision_tower(self):
vision_tower = getattr(self, 'vision_tower', None)
if type(vision_tower) is list:
vision_tower = vision_tower[0]
return vision_tower
def initialize_vision_modules(self, model_args, fsdp=None):
mm_vision_select_layer = model_args.mm_vision_select_layer
mm_vision_select_feature = model_args.mm_vision_select_feature
pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter
self.config.mm_vision_tower = model_args.vision_tower
self.config.use_mm_proj = True
self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear')
self.config.mm_vision_select_layer = mm_vision_select_layer
self.config.mm_vision_select_feature = mm_vision_select_feature
# Build VisionTower
vision_tower = build_vision_tower(model_args)
if fsdp is not None and len(fsdp) > 0:
self.vision_tower = [vision_tower]
else:
self.vision_tower = vision_tower
self.config.mm_hidden_size = vision_tower.hidden_size
# Build Vision-Projector
self.mm_projector = build_vision_projector(self.config)
if pretrain_mm_mlp_adapter is not None:
mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')
def get_w(weights, keyword):
return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k}
self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))
class MobileVLMMetaForCausalLM(ABC):
@abstractmethod
def get_model(self):
pass
def get_vision_tower(self):
return self.get_model().get_vision_tower()
def encode_images(self, images):
image_features = self.get_model().get_vision_tower()(images)
image_features = self.get_model().mm_projector(image_features)
return image_features
def prepare_inputs_labels_for_multimodal(
self, input_ids, attention_mask, past_key_values, labels, images
):
vision_tower = self.get_vision_tower()
if vision_tower is None or images is None or input_ids.shape[1] == 1:
if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:
attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)
return input_ids, attention_mask, past_key_values, None, labels
if type(images) is list or images.ndim == 5:
concat_images = torch.cat([image for image in images], dim=0)
image_features = self.encode_images(concat_images)
split_sizes = [image.shape[0] for image in images]
image_features = torch.split(image_features, split_sizes, dim=0)
image_features = [x.flatten(0, 1) for x in image_features]
else:
image_features = self.encode_images(images)
new_input_embeds = []
new_labels = [] if labels is not None else None
cur_image_idx = 0
for batch_idx, cur_input_ids in enumerate(input_ids):
| if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: kinggongzilla/ai-clone-whatsapp
# Path: configs/datasets.py
class custom_dataset:
# Path: configs/peft.py
class lora_config:
r: int=8
lora_alpha: int=32
target_modules: List[str] = field(default_factory=lambda: ["q_proj", "v_proj"])
bias= "none"
task_type: str= "CAUSAL_LM"
lora_dropout: float=0.05
inference_mode: bool = False
# Path: configs/peft.py
class llama_adapter_config:
adapter_len: int= 10
adapter_layers: int= 30
task_type: str= "CAUSAL_LM"
# Path: configs/peft.py
class prefix_config:
num_virtual_tokens: int=30
task_type: str= "CAUSAL_LM"
# Path: configs/training.py
class train_config:
whatsapp_username: str="" # your own whatsapp user name as it is in the chat .txt files
model_name: str="mistralai/Mistral-7B-Instruct-v0.2"
enable_fsdp: bool=False
low_cpu_fsdp: bool=False
run_validation: bool=False
batch_size_training: int=1
batching_strategy: str="packing" #alternative: padding
context_length: int=4096
gradient_accumulation_steps: int=1
gradient_clipping: bool = False
gradient_clipping_threshold: float = 1.0
num_epochs: int=1
num_workers_dataloader: int=1
lr: float=1e-4
weight_decay: float=0.0
gamma: float= 0.85
seed: int=42
use_fp16: bool=True
mixed_precision: bool=True
val_batch_size: int=1
dataset = "custom_dataset"
data_dir: str = "data/preprocessing/processed_chats"
peft_method: str = "lora" # None , llama_adapter, prefix
use_peft: bool=True
output_dir: str = "checkpoints"
freeze_layers: bool = False
num_freeze_layers: int = 1
quantization: bool = True
one_gpu: bool = False
save_model: bool = True
dist_checkpoint_root_folder: str="PATH/to/save/FSDP/model" # will be used if using FSDP
dist_checkpoint_folder: str="fine-tuned" # will be used if using FSDP
save_optimizer: bool=False # will be used if using FSDP
use_fast_kernels: bool = False # Enable using SDPA from PyTroch Accelerated Transformers, make use Flash Attention and Xformer memory-efficient kernels
# Path: data/sampler.py
class LengthBasedBatchSampler(torch.utils.data.BatchSampler):
def __init__(self, data_source, batch_size: int, drop_last: bool, shuffle: bool=True) -> None:
if isinstance(next(iter(data_source)), dict):
first_key = next(iter(next(iter(data_source)).keys()))
self.lengths = [len(d[first_key]) for d in data_source]
else:
self.lengths = [len(d) for d in data_source]
self.batch_size = batch_size
self.drop_last = drop_last
self.shuffle = shuffle
def __iter__(self):
ids = np.argsort(self.lengths)
if self.drop_last:
ids = ids[:len(ids) // self.batch_size * self.batch_size]
batches = [ids[i:i+self.batch_size] for i in range(0, len(ids), self.batch_size)]
if self.shuffle:
random.shuffle(batches)
for b in batches:
yield b
def __len__(self):
if self.drop_last:
return len(self.lengths) // self.batch_size
else:
return len(self.lengths) // self.batch_size + (len(self.lengths) % self.batch_size > 0)
# Path: data/sampler.py
class DistributedLengthBasedBatchSampler(torch.utils.data.BatchSampler):
def __init__(self, data_source, batch_size: int, num_replicas: int, rank: int, shuffle: bool = True, seed: int = 0) -> None:
random.seed(seed)
self.batch_sampler = LengthBasedBatchSampler(
data_source, batch_size=batch_size, drop_last=True, shuffle=shuffle
)
self.num_replicas = num_replicas
self.rank = rank
def __iter__(self):
max_length = len(self.batch_sampler) // self.num_replicas * self.num_replicas
return islice(self.batch_sampler, self.rank, max_length, self.num_replicas)
def __len__(self):
return len(self.batch_sampler) // self.num_replicas
# Path: utils/dataset_utils.py
DATASET_PREPROC = {
"custom_dataset": get_custom_dataset,
}
# Path: utils/config_utils.py
import inspect
import torch.distributed as dist
from dataclasses import asdict
from torch.utils.data import DistributedSampler
from peft import (
LoraConfig,
AdaptionPromptConfig,
PrefixTuningConfig,
)
from transformers import default_data_collator
from transformers.data import DataCollatorForSeq2Seq
from configs import datasets, lora_config, llama_adapter_config, prefix_config, train_config
from data.sampler import LengthBasedBatchSampler, DistributedLengthBasedBatchSampler
from utils.dataset_utils import DATASET_PREPROC
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
def update_config(config, **kwargs):
if isinstance(config, (tuple, list)):
for c in config:
update_config(c, **kwargs)
else:
for k, v in kwargs.items():
if hasattr(config, k):
setattr(config, k, v)
elif "." in k:
# allow --some_config.some_param=True
config_name, param_name = k.split(".")
if type(config).__name__ == config_name:
if hasattr(config, param_name):
setattr(config, param_name, v)
else:
# In case of specialized config we can warm user
print(f"Warning: {config_name} does not accept parameter: {k}")
elif isinstance(config, train_config):
print(f"Warning: unknown parameter {k}")
def generate_peft_config(train_config, kwargs):
| configs = (lora_config, llama_adapter_config, prefix_config) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: FoundationVision/UniRef
# Path: projects/UniRef/uniref/util/box_ops.py
def box_cxcywh_to_xyxy(x):
# print('box:\n', x)
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
# Path: projects/UniRef/uniref/util/box_ops.py
def generalized_box_iou(boxes1, boxes2):
"""
Generalized IoU from https://giou.stanford.edu/
The boxes should be in [x0, y0, x1, y1] format
Returns a [N, M] pairwise matrix, where N = len(boxes1)
and M = len(boxes2)
"""
# degenerate boxes gives inf / nan results
# so do an early check
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
iou, union = box_iou(boxes1, boxes2)
lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
wh = (rb - lt).clamp(min=0) # [N,M,2]
area = wh[:, :, 0] * wh[:, :, 1]
return iou - (area - union) / (area+1e-7)
# Path: projects/UniRef/uniref/models/deformable_detr/matcher.py
import torch
import torch.nn.functional as F
import torchvision.ops as ops
from scipy.optimize import linear_sum_assignment
from torch import nn
from ...util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou
# ------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
"""
Modules to compute the matching cost and solve the corresponding LSAP.
"""
class HungarianMatcher(nn.Module):
"""This class computes an assignment between the targets and the predictions of the network
For efficiency reasons, the targets don't include the no_object. Because of this, in general,
there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
while the others are un-matched (and thus treated as non-objects).
"""
def __init__(self,
cost_class: float = 1,
cost_bbox: float = 1,
cost_giou: float = 1):
"""Creates the matcher
Params:
cost_class: This is the relative weight of the classification error in the matching cost
cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost
cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost
"""
super().__init__()
self.cost_class = cost_class
self.cost_bbox = cost_bbox
self.cost_giou = cost_giou
assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0"
def forward_ota(self, outputs, targets):
""" simOTA for detr
"""
with torch.no_grad():
bs, num_queries = outputs["pred_logits"].shape[:2]
out_prob = outputs["pred_logits"].sigmoid()
out_bbox = outputs["pred_boxes"] # 跳过frame 维度
indices = []
matched_ids = []
for batch_idx in range(bs):
bz_boxes = out_bbox[batch_idx] #[300,4]
bz_out_prob = out_prob[batch_idx]
bz_tgt_ids = targets[batch_idx]["labels"]
num_insts = len(bz_tgt_ids)
bz_gtboxs = targets[batch_idx]['boxes'].reshape(num_insts,4) #[num_gt, 4]
fg_mask, is_in_boxes_and_center = \
self.get_in_boxes_info(bz_boxes,bz_gtboxs,expanded_strides=32)
pair_wise_ious = ops.box_iou(box_cxcywh_to_xyxy(bz_boxes), box_cxcywh_to_xyxy(bz_gtboxs))
# pair_wise_ious_loss = -torch.log(pair_wise_ious + 1e-8)
# Compute the classification cost.
alpha = 0.25
gamma = 2.0
neg_cost_class = (1 - alpha) * (bz_out_prob ** gamma) * (-(1 - bz_out_prob + 1e-8).log())
pos_cost_class = alpha * ((1 - bz_out_prob) ** gamma) * (-(bz_out_prob + 1e-8).log())
cost_class = pos_cost_class[:, bz_tgt_ids] - neg_cost_class[:, bz_tgt_ids]
| cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(bz_boxes), box_cxcywh_to_xyxy(bz_gtboxs)) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: xhuangcv/humannorm
# Path: threestudio/models/materials/base.py
class BaseMaterial(BaseModule):
@dataclass
class Config(BaseModule.Config):
pass
cfg: Config
requires_normal: bool = False
requires_tangent: bool = False
def configure(self):
pass
def forward(self, *args, **kwargs) -> Float[Tensor, "*B 3"]:
raise NotImplementedError
def export(self, *args, **kwargs) -> Dict[str, Any]:
return {}
# Path: threestudio/models/networks.py
def get_encoding(n_input_dims: int, config) -> nn.Module:
# input suppose to be range [0, 1]
encoding: nn.Module
if config.otype == "ProgressiveBandFrequency":
encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))
elif config.otype == "ProgressiveBandHashGrid":
encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))
else:
encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))
encoding = CompositeEncoding(
encoding,
include_xyz=config.get("include_xyz", False),
xyz_scale=2.0,
xyz_offset=-1.0,
) # FIXME: hard coded
return encoding
# Path: threestudio/models/networks.py
def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:
network: nn.Module
if config.otype == "VanillaMLP":
network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))
elif config.otype == "SphereInitVanillaMLP":
network = SphereInitVanillaMLP(
n_input_dims, n_output_dims, config_to_primitive(config)
)
else:
assert (
config.get("sphere_init", False) is False
), "sphere_init=True only supported by VanillaMLP"
network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))
return network
# Path: threestudio/utils/ops.py
def dot(x, y):
return torch.sum(x * y, -1, keepdim=True)
# Path: threestudio/utils/ops.py
def get_activation(name) -> Callable:
if name is None:
return lambda x: x
name = name.lower()
if name == "none":
return lambda x: x
elif name == "lin2srgb":
return lambda x: torch.where(
x > 0.0031308,
torch.pow(torch.clamp(x, min=0.0031308), 1.0 / 2.4) * 1.055 - 0.055,
12.92 * x,
).clamp(0.0, 1.0)
elif name == "exp":
return lambda x: torch.exp(x)
elif name == "shifted_exp":
return lambda x: torch.exp(x - 1.0)
elif name == "trunc_exp":
return trunc_exp
elif name == "shifted_trunc_exp":
return lambda x: trunc_exp(x - 1.0)
elif name == "sigmoid":
return lambda x: torch.sigmoid(x)
elif name == "tanh":
return lambda x: torch.tanh(x)
elif name == "shifted_softplus":
return lambda x: F.softplus(x - 1.0)
elif name == "scale_-11_01":
return lambda x: x * 0.5 + 0.5
else:
try:
return getattr(F, name)
except AttributeError:
raise ValueError(f"Unknown activation function: {name}")
# Path: threestudio/models/materials/neural_radiance_material.py
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio
from dataclasses import dataclass, field
from threestudio.models.materials.base import BaseMaterial
from threestudio.models.networks import get_encoding, get_mlp
from threestudio.utils.ops import dot, get_activation
from threestudio.utils.typing import *
@threestudio.register("neural-radiance-material")
class NeuralRadianceMaterial(BaseMaterial):
@dataclass
class Config(BaseMaterial.Config):
input_feature_dims: int = 8
color_activation: str = "sigmoid"
dir_encoding_config: dict = field(
default_factory=lambda: {"otype": "SphericalHarmonics", "degree": 3}
)
mlp_network_config: dict = field(
default_factory=lambda: {
"otype": "FullyFusedMLP",
"activation": "ReLU",
"n_neurons": 16,
"n_hidden_layers": 2,
}
)
cfg: Config
def configure(self) -> None:
| self.encoding = get_encoding(3, self.cfg.dir_encoding_config) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: jianchang512/stt
# Path: stslib/cfg.py
LANG = "en" if locale.getdefaultlocale()[0].split('_')[0].lower() != 'zh' else "zh"
ROOT_DIR = os.getcwd()
MODEL_DIR = os.path.join(ROOT_DIR, 'models')
STATIC_DIR = os.path.join(ROOT_DIR, 'static')
TMP_DIR = os.path.join(STATIC_DIR, 'tmp')
# Path: stslib/tool.py
def runffmpeg(arg):
def checkupdate():
def openweb(web_address):
def ms_to_time_string(*, ms=0, seconds=None):
# Path: stslib/cfg.py
ROOT_DIR = os.getcwd()
# Path: start.py
import logging
import re
import threading
import sys
import torch
import os
from flask import Flask, request, render_template, jsonify, send_from_directory
from gevent.pywsgi import WSGIServer, WSGIHandler, LoggingLogAdapter
from logging.handlers import RotatingFileHandler
from stslib import cfg, tool
from stslib.cfg import ROOT_DIR
from faster_whisper import WhisperModel
device = "cuda" if torch.cuda.is_available() else "cpu"
class CustomRequestHandler(WSGIHandler):
def log_request(self):
pass
# 配置日志
# 禁用 Werkzeug 默认的日志处理器
log = logging.getLogger('werkzeug')
log.handlers[:] = []
log.setLevel(logging.WARNING)
app = Flask(__name__, static_folder=os.path.join(ROOT_DIR, 'static'), static_url_path='/static',
template_folder=os.path.join(ROOT_DIR, 'templates'))
root_log = logging.getLogger() # Flask的根日志记录器
root_log.handlers = []
root_log.setLevel(logging.WARNING)
# 配置日志
app.logger.setLevel(logging.WARNING) # 设置日志级别为 INFO
# 创建 RotatingFileHandler 对象,设置写入的文件路径和大小限制
file_handler = RotatingFileHandler(os.path.join(ROOT_DIR, 'sts.log'), maxBytes=1024 * 1024, backupCount=5)
# 创建日志的格式
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# 设置文件处理器的级别和格式
file_handler.setLevel(logging.WARNING)
file_handler.setFormatter(formatter)
# 将文件处理器添加到日志记录器中
app.logger.addHandler(file_handler)
@app.route('/static/<path:filename>')
def static_files(filename):
return send_from_directory(app.config['STATIC_FOLDER'], filename)
@app.route('/')
def index():
return render_template("index.html",
cuda=cfg.cuda,
lang_code=cfg.lang_code,
language=cfg.LANG,
root_dir=ROOT_DIR.replace('\\', '/'))
# 上传音频
@app.route('/upload', methods=['POST'])
def upload():
try:
# 获取上传的文件
audio_file = request.files['audio']
# 如果是mp4
noextname, ext = os.path.splitext(audio_file.filename)
ext = ext.lower()
# 如果是视频,先分离
wav_file = os.path.join(cfg.TMP_DIR, f'{noextname}.wav')
if os.path.exists(wav_file) and os.path.getsize(wav_file) > 0:
return jsonify({'code': 0, 'msg': cfg.transobj['lang1'], "data": os.path.basename(wav_file)})
msg = ""
if ext in ['.mp4', '.mov', '.avi', '.mkv', '.mpeg', '.mp3', '.flac']:
video_file = os.path.join(cfg.TMP_DIR, f'{noextname}{ext}')
audio_file.save(video_file)
params = [
"-i",
video_file,
]
if ext not in ['.mp3', '.flac']:
params.append('-vn')
params.append(wav_file)
| rs = tool.runffmpeg(params) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: jesenzhang/ComfyUI_StreamDiffusion
# Path: streamdiffusion/image_filter.py
class SimilarImageFilter:
def __init__(self, threshold: float = 0.98, max_skip_frame: float = 10) -> None:
self.threshold = threshold
self.prev_tensor = None
self.cos = torch.nn.CosineSimilarity(dim=0, eps=1e-6)
self.max_skip_frame = max_skip_frame
self.skip_count = 0
def __call__(self, x: torch.Tensor) -> Optional[torch.Tensor]:
if self.prev_tensor is None:
self.prev_tensor = x.detach().clone()
return x
else:
cos_sim = self.cos(self.prev_tensor.reshape(-1), x.reshape(-1)).item()
sample = random.uniform(0, 1)
if self.threshold >= 1:
skip_prob = 0
else:
skip_prob = max(0, 1 - (1 - cos_sim) / (1 - self.threshold))
# not skip frame
if skip_prob < sample:
self.prev_tensor = x.detach().clone()
return x
# skip frame
else:
if self.skip_count > self.max_skip_frame:
self.skip_count = 0
self.prev_tensor = x.detach().clone()
return x
else:
self.skip_count += 1
return None
def set_threshold(self, threshold: float) -> None:
self.threshold = threshold
def set_max_skip_frame(self, max_skip_frame: float) -> None:
self.max_skip_frame = max_skip_frame
# Path: streamdiffusion/image_utils.py
def postprocess_image(
image: torch.Tensor,
output_type: str = "pil",
do_denormalize: Optional[List[bool]] = None,
) -> Union[torch.Tensor, np.ndarray, PIL.Image.Image]:
if not isinstance(image, torch.Tensor):
raise ValueError(
f"Input for postprocessing is in incorrect format: {type(image)}. We only support pytorch tensor"
)
if output_type == "latent":
return image
do_normalize_flg = True
if do_denormalize is None:
do_denormalize = [do_normalize_flg] * image.shape[0]
image = torch.stack(
[
denormalize(image[i]) if do_denormalize[i] else image[i]
for i in range(image.shape[0])
]
)
if output_type == "pt":
return image
image = pt_to_numpy(image)
if output_type == "np":
return image
if output_type == "pil":
return numpy_to_pil(image)
# Path: streamdiffusion/pipeline.py
import time
import numpy as np
import PIL.Image
import torch
from typing import List, Optional, Union, Any, Dict, Tuple, Literal
from diffusers import LCMScheduler, StableDiffusionPipeline
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img import (
retrieve_latents,
)
from .image_filter import SimilarImageFilter
from .image_utils import postprocess_image
class StreamDiffusion:
def __init__(
self,
pipe: StableDiffusionPipeline,
t_index_list: List[int],
torch_dtype: torch.dtype = torch.float16,
width: int = 512,
height: int = 512,
do_add_noise: bool = True,
use_denoising_batch: bool = True,
frame_buffer_size: int = 1,
cfg_type: Literal["none", "full", "self", "initialize"] = "self",
) -> None:
self.device = pipe.device
self.dtype = torch_dtype
self.generator = None
self.height = height
self.width = width
self.latent_height = int(height // pipe.vae_scale_factor)
self.latent_width = int(width // pipe.vae_scale_factor)
self.frame_bff_size = frame_buffer_size
self.denoising_steps_num = len(t_index_list)
self.cfg_type = cfg_type
if use_denoising_batch:
self.batch_size = self.denoising_steps_num * frame_buffer_size
if self.cfg_type == "initialize":
self.trt_unet_batch_size = (
self.denoising_steps_num + 1
) * self.frame_bff_size
elif self.cfg_type == "full":
self.trt_unet_batch_size = (
2 * self.denoising_steps_num * self.frame_bff_size
)
else:
self.trt_unet_batch_size = self.denoising_steps_num * frame_buffer_size
else:
self.trt_unet_batch_size = self.frame_bff_size
self.batch_size = frame_buffer_size
self.t_list = t_index_list
self.do_add_noise = do_add_noise
self.use_denoising_batch = use_denoising_batch
self.similar_image_filter = False
| self.similar_filter = SimilarImageFilter() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: neobundy/MLX-Stable-Diffusion-WebUI
# Path: stable_diffusion/config.py
class DiffuserModelPathConfig:
class BaseConfig:
class AutoencoderConfig(BaseConfig):
class CLIPTextModelConfig(BaseConfig):
class UNetConfig(BaseConfig):
class DiffusionConfig(BaseConfig):
def __init__(self, model_path: str = "./diffuser_models"):
def unet_config(self):
def unet(self):
def scheduler(self):
def text_encoder_config(self):
def text_encoder(self):
def vae_config(self):
def vae(self):
def diffusion_config(self):
def tokenizer_vocab(self):
def tokenizer_merges(self):
def __getitem__(self, key):
def __setitem__(self, key, value):
# Path: stable_diffusion/model_io.py
_DEBUG = False
def _debug_print(*args, **kwargs):
def _from_numpy(x):
def map_unet_weights(key, value):
def map_clip_text_encoder_weights(key, value):
def map_vae_weights(key, value):
def _flatten(params):
def _load_safetensor_weights(mapper, model, weight_file, float16: bool = False):
def _check_key(key: str, part: str):
def load_unet(key: str = _DEFAULT_MODEL, float16: bool = False):
def load_text_encoder(key: str = _DEFAULT_MODEL, float16: bool = False):
def load_autoencoder(key: str = _DEFAULT_MODEL, float16: bool = False):
def load_diffusion_config(key: str = _DEFAULT_MODEL):
def load_tokenizer(key: str = _DEFAULT_MODEL):
def load_unet_local(weights_path: str, config_path: str, float16: bool = False):
def load_text_encoder_local(weights_path: str, config_path: str, float16: bool = False):
def load_autoencoder_local(weights_path: str, config_path: str, float16: bool = False):
def load_diffusion_config_local(config_path:str):
def load_tokenizer_local(vocab_path: str, merges_path: str):
def load_diffuser_model(diffuser_model_path: str, float16: bool = False):
# Path: utils.py
def _state_dict(model):
"""Return the model's state_dict as a dictionary."""
state_dict = {}
for name, param in model.parameters().items():
state_dict[name] = param
return state_dict
# Path: utils.py
def get_state_dict_from_safetensor(checkpoint_path: str):
"""Return the state_dict from the checkpoint."""
state_dict = {}
with safetensor_open(checkpoint_path, framework="numpy") as f:
# Access the data in the file
for key in f.keys():
tensor = f.get_tensor(key)
state_dict[key] = tensor
return state_dict
# Path: model_inspector.py
from stable_diffusion.config import PathConfig
from stable_diffusion.model_io import preload_models_from_safetensor_weights
from utils import _state_dict
from utils import get_state_dict_from_safetensor
INSPECTION_FILE = "model_inspection.txt"
NUM_ITEMS = 100
MODEL_FILE = "./models/v2-1_512-ema-pruned.safetensors"
MODEL_FILE1 = "./unet/diffusion_pytorch_model_test.safetensors"
MODEL_FILE2 = "./unet/xxmix9realistic_v40.safetensors"
# Recreate the inspection file at every execution of the script
with open(INSPECTION_FILE, 'w') as f:
pass
def write_to_file(*args, **kwargs):
"""Write the text to the inspection file."""
# Convert the arguments to a string
message = ' '.join(map(str, args))
# Print the message to the console
print(message, **kwargs)
# Open the log file in append mode and write the message
with open(INSPECTION_FILE, 'a') as f:
f.write(message + '\n')
def inspect_model(path_config: PathConfig, keys_only=True):
"""Inspect the contents of the models."""
# Load the models using the provided config and weights paths
unet_model = load_unet_local(path_config.unet_config, MODEL_FILE)
text_encoder_model = load_text_encoder_local(MODEL_FILE)
autoencoder_model = load_autoencoder_local(MODEL_FILE)
diffusion_config = load_diffusion_config_local(path_config.diffusion_config)
tokenizer = load_tokenizer_local(path_config.tokenizer_vocab, path_config.tokenizer_merges)
# Convert the models' state_dict to a dictionary and iterate over it
for model_name, model in zip(["unet", "text_encoder", "autoencoder"], [unet_model, text_encoder_model, autoencoder_model]):
write_to_file("-" * 50)
write_to_file(f"Model: {model_name}")
write_to_file("-" * 50)
| for key, value in _state_dict(model).items(): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ffmemes/ff-backend
# Path: src/database.py
DATABASE_URL = str(settings.DATABASE_URL)
async def fetch_one(select_query: Select | Insert | Update) -> dict[str, Any] | None:
async def fetch_all(select_query: Select | Insert | Update) -> list[dict[str, Any]]:
async def execute(select_query: Insert | Update) -> CursorResult:
# Path: src/storage/parsers/schemas.py
class TgChannelPostParsingResult(CustomModel):
post_id: int
url: str
content: str | None = None # post text
media: list[dict] | None = None
views: int
date: datetime
mentions: list[str] | None = None # mentioned usernames
hashtags: list[str] | None = None
forwarded: dict | None = None
forwarded_url: str | None = None # url to forwarded post
link_preview: dict | None = None
out_links: list[str] | None = None
# Path: src/storage/parsers/schemas.py
class VkGroupPostParsingResult(CustomModel):
post_id: str
url: str
content: str | None = None # post text
media: list[str]
date: datetime
views: int
likes: int
reposts: int
comments: int
# Path: src/storage/constants.py
class MemeSourceType(str, Enum):
TELEGRAM = "telegram"
VK = "vk"
REDDIT = "reddit"
INSTAGRAM = "instagram"
TWITTER = "twitter"
TIKTOK = "tiktok"
USER_UPLOAD = "user upload"
# Path: src/storage/constants.py
class MemeSourceStatus(str, Enum):
IN_MODERATION = "in_moderation"
PARSING_ENABLED = "parsing_enabled"
PARSING_DISABLED = "parsing_disabled"
# Path: src/storage/constants.py
class MemeType(str, Enum):
IMAGE = "image"
ANIMATION = "animation"
VIDEO = "video"
# Path: src/storage/constants.py
class MemeStatus(str, Enum):
CREATED = "created"
OK = "ok"
DUPLICATE = "duplicate"
AD = "ad"
BROKEN_CONTENT_LINK = "broken_content_link"
# TODO: more statuses?
# IN_MODERATION = "in_moderation"
# Path: src/storage/constants.py
MEME_RAW_TELEGRAM_MEME_SOURCE_POST_UNIQUE_CONSTRAINT = "meme_raw_telegram_meme_source_id_post_id_key"
# Path: src/storage/constants.py
MEME_RAW_VK_MEME_SOURCE_POST_UNIQUE_CONSTRAINT = "meme_raw_vk_meme_source_id_post_id_key"
# Path: src/storage/service.py
from typing import Any
from datetime import datetime
from sqlalchemy import select, nulls_first, text
from sqlalchemy.dialects.postgresql import insert
from src.database import (
language,
meme,
meme_source,
meme_raw_telegram,
meme_raw_vk,
execute, fetch_one, fetch_all,
)
from src.storage.parsers.schemas import TgChannelPostParsingResult, VkGroupPostParsingResult
from src.storage.constants import (
MemeSourceType,
MemeSourceStatus,
MemeType,
MemeStatus,
MEME_RAW_TELEGRAM_MEME_SOURCE_POST_UNIQUE_CONSTRAINT,
MEME_RAW_VK_MEME_SOURCE_POST_UNIQUE_CONSTRAINT,
)
async def insert_parsed_posts_from_telegram(
meme_source_id: int,
telegram_posts: list[TgChannelPostParsingResult],
) -> None:
posts = [
post.model_dump() | {"meme_source_id": meme_source_id}
for post in telegram_posts
]
insert_statement = insert(meme_raw_telegram).values(posts)
insert_posts_query = insert_statement.on_conflict_do_update(
constraint=MEME_RAW_TELEGRAM_MEME_SOURCE_POST_UNIQUE_CONSTRAINT,
set_={
"media": insert_statement.excluded.media,
"views": insert_statement.excluded.views,
"updated_at": datetime.utcnow(),
},
)
await execute(insert_posts_query)
async def insert_parsed_posts_from_vk(
meme_source_id: int,
vk_posts: list[VkGroupPostParsingResult],
) -> None:
posts = [
post.model_dump() | {"meme_source_id": meme_source_id}
for post in vk_posts
]
insert_statement = insert(meme_raw_vk).values(posts)
insert_posts_query = insert_statement.on_conflict_do_update(
constraint=MEME_RAW_VK_MEME_SOURCE_POST_UNIQUE_CONSTRAINT,
set_={
"media": insert_statement.excluded.media,
"views": insert_statement.excluded.views,
"likes": insert_statement.excluded.likes,
"reposts": insert_statement.excluded.reposts,
"comments": insert_statement.excluded.comments,
"updated_at": datetime.utcnow(),
},
)
await execute(insert_posts_query)
async def get_telegram_sources_to_parse(limit=10) -> list[dict[str, Any]]:
select_query = (
select(meme_source)
| .where(meme_source.c.type == MemeSourceType.TELEGRAM) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Con6924/SPM
# Path: src/misc/clip_templates.py
# Path: src/engine/train_util.py
def encode_prompts(
tokenizer: CLIPTokenizer,
text_encoder: CLIPTokenizer,
prompts: list[str],
return_tokens: bool = False,
):
text_tokens = text_tokenize(tokenizer, prompts)
text_embeddings = text_encode(text_encoder, text_tokens)
if return_tokens:
return text_embeddings, torch.unique(text_tokens, dim=1)
return text_embeddings
# Path: src/configs/prompt.py
from typing import Literal, Optional, Union
from pathlib import Path
from pydantic import BaseModel, root_validator
from transformers import CLIPTextModel, CLIPTokenizer
from src.misc.clip_templates import imagenet_templates
from src.engine.train_util import encode_prompts
import yaml
import pandas as pd
import random
import torch
class PromptEmbedsXL:
text_embeds: torch.FloatTensor
pooled_embeds: torch.FloatTensor
def __init__(self, embeds) -> None:
self.text_embeds, self.pooled_embeds = embeds
PROMPT_EMBEDDING = Union[torch.FloatTensor, PromptEmbedsXL]
class PromptEmbedsCache:
prompts: dict[str, PROMPT_EMBEDDING] = {}
def __setitem__(self, __name: str, __value: PROMPT_EMBEDDING) -> None:
self.prompts[__name] = __value
def __getitem__(self, __name: str) -> Optional[PROMPT_EMBEDDING]:
if __name in self.prompts:
return self.prompts[__name]
else:
return None
class PromptSettings(BaseModel): # yaml
target: str
positive: str = None # if None, target will be used
unconditional: str = "" # default is ""
neutral: str = None # if None, unconditional will be used
action: ACTION_TYPES = "erase" # default is "erase"
guidance_scale: float = 1.0 # default is 1.0
resolution: int = 512 # default is 512
dynamic_resolution: bool = False # default is False
batch_size: int = 1 # default is 1
dynamic_crops: bool = False # default is False. only used when model is XL
use_template: bool = False # default is False
la_strength: float = 1000.0
sampling_batch_size: int = 4
seed: int = None
case_number: int = 0
@root_validator(pre=True)
def fill_prompts(cls, values):
keys = values.keys()
if "target" not in keys:
raise ValueError("target must be specified")
if "positive" not in keys:
values["positive"] = values["target"]
if "unconditional" not in keys:
values["unconditional"] = ""
if "neutral" not in keys:
values["neutral"] = values["unconditional"]
return values
class PromptEmbedsPair:
target: PROMPT_EMBEDDING # the concept that do not want to generate
positive: PROMPT_EMBEDDING # generate the concept
unconditional: PROMPT_EMBEDDING # uncondition (default should be empty)
neutral: PROMPT_EMBEDDING # base condition (default should be empty)
use_template: bool = False # use clip template or not
guidance_scale: float
resolution: int
dynamic_resolution: bool
batch_size: int
dynamic_crops: bool
loss_fn: torch.nn.Module
action: ACTION_TYPES
def __init__(
self,
loss_fn: torch.nn.Module,
target: PROMPT_EMBEDDING,
positive: PROMPT_EMBEDDING,
unconditional: PROMPT_EMBEDDING,
neutral: PROMPT_EMBEDDING,
settings: PromptSettings,
) -> None:
self.loss_fn = loss_fn
self.target = target
self.positive = positive
self.unconditional = unconditional
self.neutral = neutral
self.settings = settings
self.use_template = settings.use_template
self.guidance_scale = settings.guidance_scale
self.resolution = settings.resolution
self.dynamic_resolution = settings.dynamic_resolution
self.batch_size = settings.batch_size
self.dynamic_crops = settings.dynamic_crops
self.action = settings.action
self.la_strength = settings.la_strength
self.sampling_batch_size = settings.sampling_batch_size
def _prepare_embeddings(
self,
cache: PromptEmbedsCache,
tokenizer: CLIPTokenizer,
text_encoder: CLIPTextModel,
):
"""
Prepare embeddings for training. When use_template is True, the embeddings will be
format using a template, and then be processed by the model.
"""
if not self.use_template:
return
template = random.choice(imagenet_templates)
target_prompt = template.format(self.settings.target)
if cache[target_prompt]:
self.target = cache[target_prompt]
else:
| self.target = encode_prompts(tokenizer, text_encoder, [target_prompt]) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: dakpinaroglu/Frame2seq
# Path: frame2seq/utils/residue_constants.py
def load_stereo_chemical_props() -> Tuple[Mapping[str, List[Bond]],
def make_bond_key(atom1_name, atom2_name):
def sequence_to_onehot(
sequence: str,
mapping: Mapping[str, int],
) -> np.ndarray:
def _make_standard_atom_mask() -> np.ndarray:
def _make_rigid_transformation_4x4(ex, ey, translation):
AA_TO_ID = {
'A': 0,
'C': 1,
'D': 2,
'E': 3,
'F': 4,
'G': 5,
'H': 6,
'I': 7,
'K': 8,
'L': 9,
'M': 10,
'N': 11,
'P': 12,
'Q': 13,
'R': 14,
'S': 15,
'T': 16,
'V': 17,
'W': 18,
'Y': 19,
'X': 20,
}
ID_TO_AA = {
0: 'A',
1: 'C',
2: 'D',
3: 'E',
4: 'F',
5: 'G',
6: 'H',
7: 'I',
8: 'K',
9: 'L',
10: 'M',
11: 'N',
12: 'P',
13: 'Q',
14: 'R',
15: 'S',
16: 'T',
17: 'V',
18: 'W',
19: 'Y',
20: 'X',
}
STANDARD_ATOM_MASK = _make_standard_atom_mask()
# Path: frame2seq/utils/util.py
def get_neg_pll(probs, seq):
seq_probs = torch.gather(probs, 1, seq.unsqueeze(-1)).squeeze(-1)
neg_pll = -1 * torch.log(seq_probs)
avg_neg_pll = neg_pll.sum().item() / len(neg_pll)
return neg_pll, avg_neg_pll
# Path: frame2seq/utils/util.py
def read_fasta_file(fasta_file):
"""
Read a fasta file and return a list of sequences.
"""
with open(fasta_file, 'r') as f:
lines = f.readlines()
sequences = []
for line in lines:
if line[0] == '>':
sequences.append(lines[lines.index(line) + 1].strip())
return sequences
# Path: frame2seq/utils/pdb2input.py
def get_inference_inputs(pdb_file, chain_id):
atom_positions, aatype, seq_mask = get_parsed_inputs(pdb_file, chain_id)
seq_mask = seq_mask.unsqueeze(0)
aatype = torch.from_numpy(aatype)
aatype = aatype.unsqueeze(0)
X = atom_positions
X = X.unsqueeze(0)
return seq_mask, aatype, X
# Path: frame2seq/utils/pred2output.py
def output_csv(preds, csv_dir):
"""
Given average negative pseudo-log-likelihoods, write to a csv file.
"""
df = pd.DataFrame(columns=[
'PDBID', 'Chain ID', 'Sample Number', 'Scored sequence',
'Average negative pseudo-log-likelihood', 'Temperature'
],
data=preds)
df.to_csv(f"{csv_dir}/scores.csv", index=False)
# Path: frame2seq/utils/pred2output.py
def output_indiv_csv(scores, csv_dir):
"""
Given per-residue negative pseudo-log-likelihoods, write to a csv file.
"""
pdbid = scores['pdbid']
chain = scores['chain']
sample = scores['sample']
res_idx = scores['res_idx']
neg_pll = scores['neg_pll']
df = pd.DataFrame(
list(zip(res_idx, neg_pll)),
columns=['Residue index', 'Negative pseudo-log-likelihood'])
df.to_csv(f"{csv_dir}/{pdbid}_{chain}_seq{sample}.csv", index=False)
# Path: frame2seq/utils/score.py
import os
import torch
from tqdm import tqdm
from frame2seq.utils import residue_constants
from frame2seq.utils.util import get_neg_pll, read_fasta_file
from frame2seq.utils.pdb2input import get_inference_inputs
from frame2seq.utils.pred2output import output_csv, output_indiv_csv
def score(self, pdb_file, chain_id, fasta_file, save_indiv_neg_pll):
temperature = 1.0
seq_mask, aatype, X = get_inference_inputs(pdb_file, chain_id)
seq_mask = seq_mask.to(self.device)
aatype = aatype.to(self.device)
X = X.to(self.device)
str_form = [residue_constants.ID_TO_AA[int(i)] for i in aatype[0]]
input_aatype_onehot = residue_constants.sequence_to_onehot(
sequence=str_form,
mapping=residue_constants.AA_TO_ID,
)
input_aatype_onehot = torch.from_numpy(input_aatype_onehot).float()
input_aatype_onehot = input_aatype_onehot.unsqueeze(0)
input_aatype_onehot = input_aatype_onehot.to(self.device)
input_aatype_onehot = torch.zeros_like(input_aatype_onehot)
input_aatype_onehot[:, :,
20] = 1 # all positions are masked (set to unknown)
scores, preds = {}, []
with torch.no_grad():
pred_seq1 = self.models[0].forward(X, seq_mask, input_aatype_onehot)
pred_seq2 = self.models[1].forward(X, seq_mask, input_aatype_onehot)
pred_seq3 = self.models[2].forward(X, seq_mask, input_aatype_onehot)
pred_seq = (pred_seq1 + pred_seq2 + pred_seq3) / 3 # ensemble
pred_seq = pred_seq / temperature
pred_seq = torch.nn.functional.softmax(pred_seq, dim=-1)
pred_seq = pred_seq[seq_mask]
if fasta_file is not None:
| input_seqs = read_fasta_file(fasta_file) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: davep/oshit
# Path: oshit/app/data/config.py
@lru_cache(maxsize=None)
def load_configuration() -> Configuration:
"""Load the configuration.
Returns:
The configuration.
Note:
As a side-effect, if the configuration doesn't exist a default one
will be saved to storage.
This function is designed so that it's safe and low-cost to
repeatedly call it. The configuration is cached and will only be
loaded from storage when necessary.
"""
source = configuration_file()
return (
Configuration(**loads(source.read_text(encoding="utf-8")))
if source.exists()
else save_configuration(Configuration())
)
# Path: oshit/app/data/config.py
def save_configuration(configuration: Configuration) -> Configuration:
"""Save the given configuration.
Args:
The configuration to store.
Returns:
The configuration.
"""
load_configuration.cache_clear()
configuration_file().write_text(
dumps(asdict(configuration), indent=4), encoding="utf-8"
)
return load_configuration()
# Path: oshit/app/screens/main.py
class Main(Screen[None]):
"""The main screen of the application."""
CONTEXT_HELP = """
## Application keys
| Key | Description |
| - | - |
| <kbd>F1</kbd> | This help screen. |
| <kbd>F2</kbd> | Toggle compact/relaxed display. |
| <kbd>F3</kbd> | Toggle dark/light mode. |
| <kbd>F12</kbd> | Quit the application. |
| <kbd>t</kbd> | View the top stories. |
| <kbd>n</kbd> | View the new stories. |
| <kbd>b</kbd> | View the best stories. |
| <kbd>a</kbd> | View the AskHN stories. |
| <kbd>s</kbd> | View the ShowHN stories. |
| <kbd>j</kbd> | View the jobs. |
"""
CSS = """
TabbedContent, LoadingIndicator {
background: $panel;
}
"""
TITLE = f"Orange Site Hit v{__version__}"
BINDINGS = [
Binding("f1", "help", "Help"),
Binding("f2", "compact", "Compact/Relaxed"),
Binding("f3", "toggle_dark"),
Binding("f12", "quit", "Quit"),
Binding("t", "go('top')"),
Binding("n", "go('new')"),
Binding("b", "go('best')"),
Binding("a", "go('ask')"),
Binding("s", "go('show')"),
Binding("j", "go('jobs')"),
Binding("down, enter", "pane"),
]
def __init__(self) -> None:
"""Initialise the screen."""
super().__init__()
config = load_configuration()
self._hn = HN(
max_concurrency=config.maximum_concurrency,
timeout=config.connection_timeout,
)
"""The HackerNews client object."""
def compose(self) -> ComposeResult:
"""Compose the main screen's layout."""
yield Header()
with HackerNews():
yield Items("top", "t", self._hn.top_stories)
yield Items("new", "n", self._hn.new_stories)
yield Items("best", "b", self._hn.best_stories)
yield Items("ask", "a", self._hn.latest_ask_stories)
yield Items("show", "s", self._hn.latest_show_stories)
yield Items("jobs", "j", self._hn.latest_job_stories)
yield Footer()
def _refresh_subtitle(self) -> None:
"""Refresh the subtitle of the screen."""
self.sub_title = self.query_one(HackerNews).description
def on_mount(self) -> None:
"""Configure things once the DOM is ready."""
self.set_interval(0.95, self._refresh_subtitle)
def action_help(self) -> None:
"""Show the help screen."""
self.app.push_screen(Help(self))
def action_go(self, items: str) -> None:
"""Go to the given list of items.
Args:
items: The name of the list of items to go to.
"""
self.query_one(HackerNews).active = items
self.query_one(HackerNews).focus_active_pane()
def action_compact(self) -> None:
"""Toggle the compact display."""
news = self.query_one(HackerNews)
news.compact = not news.compact
@on(ShowUser)
def show_user(self, event: ShowUser) -> None:
"""Handle a request to show the details of a user."""
self.app.push_screen(UserDetails(self._hn, event.user))
@on(ShowComments)
def show_comments(self, event: ShowComments) -> None:
"""Handle a request to show the comments for an article."""
self.app.push_screen(Comments(self._hn, event.article))
# Path: oshit/app/oshit.py
from textual.app import App
from .data import load_configuration, save_configuration
from .screens import Main
"""The main application class."""
##############################################################################
# Textual imports.
##############################################################################
# Local imports.
##############################################################################
class OSHit(App[None]):
"""The Orange Site Hit application."""
ENABLE_COMMAND_PALETTE = False
def __init__(self) -> None:
"""Initialise the application."""
super().__init__()
self.dark = load_configuration().dark_mode
def on_mount(self) -> None:
"""Get things going once the app is up and running."""
| self.push_screen(Main()) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Maximilian-Winter/llama-cpp-agent
# Path: src/llama_cpp_agent/function_calling.py
class LlamaCppFunctionTool:
def __init__(self, pydantic_model: Type[BaseModel], has_markdown_code_block=False, has_triple_quoted_string=False,
**additional_parameters):
self.model = pydantic_model
self.look_for_field_string = has_markdown_code_block or has_triple_quoted_string
self.has_markdown_code_block = has_markdown_code_block
self.has_triple_quoted_string = has_triple_quoted_string
self.additional_parameters = additional_parameters if additional_parameters else {}
def __call__(self, *args, **kwargs):
return self.model(**kwargs)
# Path: src/llama_cpp_agent/agent_memory/core_memory_manager.py
class CoreMemoryManager:
def __init__(self, core_memory: dict):
self.core_memory = core_memory
def add_to_core_memory(self, key: str, child_key: str, value) -> str:
"""
Adds or updates an entry in the core memory.
"""
if key not in self.core_memory:
self.core_memory[key] = {}
self.core_memory[key][child_key] = value
return f"Core memory updated. Key: {key}, Child Key: {child_key}"
def replace_in_core_memory(self, key: str, child_key: str, new_value) -> str:
"""
Replaces an existing entry in the core memory.
"""
if key in self.core_memory and child_key in self.core_memory[key]:
self.core_memory[key][child_key] = new_value
return f"Core memory replaced. Key: {key}, Child Key: {child_key}"
else:
return "Key or child key not found in core memory."
def remove_from_core_memory(self, key: str, child_key: str) -> str:
"""
Removes a specific field from a core memory entry.
"""
if key in self.core_memory and child_key in self.core_memory[key]:
del self.core_memory[key][child_key]
return f"Core memory entry removed. Key: {key}, Child Key: {child_key}"
else:
return "Key or child key not found in core memory."
def build_core_memory_context(self):
output = json.dumps(self.core_memory, indent=4)
context = f"# Core-Memory:\n{output if output != '{}' else 'Empty'}"
return context
def load(self, file_path):
with open(file_path, 'r', encoding='utf-8') as file:
self.core_memory = json.load(file)
def save(self, file_path):
with open(file_path, 'w', encoding='utf-8') as file:
json.dump(self.core_memory, file, indent=4)
# Path: src/llama_cpp_agent/agent_memory/retrieval_memory_manager.py
class RetrievalMemoryManager:
def __init__(self, retrieval_memory: RetrievalMemory):
def add_memory_to_retrieval(self, description: str, importance: float = 1.0) -> str:
def retrieve_memories(self, query: str, max_results: int = 5) -> str:
# Path: src/llama_cpp_agent/agent_memory/memory_tools.py
from pydantic import BaseModel, Field
from ..function_calling import LlamaCppFunctionTool
from .core_memory_manager import CoreMemoryManager
from .retrieval_memory_manager import RetrievalMemoryManager, RetrievalMemory
class AddCoreMemory(BaseModel):
"""
Add a new entry to the core memory.
"""
key: str = Field(..., description="The key identifier for the core memory entry.")
field: str = Field(..., description="A secondary key or field within the core memory entry.")
value: str = Field(..., description="The value or data to be stored in the specified core memory entry.")
def run(self, core_memory_manager: CoreMemoryManager):
return core_memory_manager.add_to_core_memory(self.key, self.field, self.value)
# Replace Core Memory Model
class ReplaceCoreMemory(BaseModel):
"""
Replace an entry in the core memory.
"""
key: str = Field(..., description="The key identifier for the core memory entry.")
field: str = Field(..., description="The specific field within the core memory entry to be replaced.")
new_value: str = Field(...,
description="The new value to replace the existing data in the specified core memory field.")
def run(self, core_memory_manager: CoreMemoryManager):
return core_memory_manager.replace_in_core_memory(self.key, self.field, self.value)
class RemoveCoreMemory(BaseModel):
"""
Remove an entry in the core memory.
"""
key: str = Field(..., description="The key identifier for the core memory entry to be removed.")
field: str = Field(..., description="The specific field within the core memory entry to be removed.")
def run(self, core_memory_manager: CoreMemoryManager):
return core_memory_manager.remove_from_core_memory(self.key, self.field)
class RetrieveMemories(BaseModel):
"""
Retrieve memories from the retrieval memory based on a query.
"""
query: str = Field(..., description="The query to be used to retrieve memories from the retrieval memory.")
def run(self, retrieval_memory_manager: RetrievalMemoryManager):
return retrieval_memory_manager.retrieve_memories(self.query)
class AddRetrievalMemory(BaseModel):
"""
Add memory to the retrieval memory.
"""
memory: str = Field(..., description="The memory to be added to the retrieval memory.")
importance: float = Field(..., description="The importance of the memory to be added to the retrieval memory.")
def run(self, retrieval_memory_manager: RetrievalMemoryManager):
return retrieval_memory_manager.add_memory_to_retrieval(self.memory, self.importance)
class AgentRetrievalMemory:
def __init__(self, persistent_db_path="./retrieval_memory", embedding_model_name="all-MiniLM-L6-v2",
collection_name="retrieval_memory_collection"):
| self.retrieval_memory = RetrievalMemory(persistent_db_path, embedding_model_name, collection_name) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: tedivm/paracelsus
# Path: paracelsus/transformers/dot.py
class Dot:
comment_format: str = "dot"
metadata: MetaData
graph: pydot.Dot
def __init__(self, metaclass: MetaData) -> None:
self.metadata = metaclass
self.graph = pydot.Dot("database", graph_type="graph")
for table in self.metadata.tables.values():
node = pydot.Node(name=table.name)
node.set_label(self._table_label(table))
node.set_shape("none")
node.set_margin("0")
self.graph.add_node(node)
for column in table.columns:
for foreign_key in column.foreign_keys:
key_parts = foreign_key.target_fullname.split(".")
left_table = key_parts[0]
left_column = key_parts[1]
edge = pydot.Edge(left_table, table.name)
edge.set_label(column.name)
edge.set_dir("both")
edge.set_arrowhead("none")
if not column.unique:
edge.set_arrowhead("crow")
l_column = self.metadata.tables[left_table].columns[left_column]
edge.set_arrowtail("none")
if not l_column.unique and not l_column.primary_key:
edge.set_arrowtail("crow")
self.graph.add_edge(edge)
def _table_label(self, table: Table) -> str:
column_output = ""
columns = sorted(table.columns, key=utils.column_sort_key)
for column in columns:
attributes = set([])
if column.primary_key:
attributes.add("Primary Key")
if len(column.foreign_keys) > 0:
attributes.add("Foreign Key")
if column.unique:
attributes.add("Unique")
column_output += f' <tr><td align="left">{column.type}</td><td align="left">{column.name}</td><td>{", ".join(sorted(attributes))}</td></tr>\n'
return f"""<
<table border="0" cellborder="1" cellspacing="0" cellpadding="4">
<tr><td colspan="3" bgcolor="lightblue"><b>{table.name}</b></td></tr>
{column_output.rstrip()}
</table>
>"""
def __str__(self) -> str:
return self.graph.to_string()
# Path: paracelsus/transformers/mermaid.py
class Mermaid:
comment_format: str = "mermaid"
metadata: MetaData
def __init__(self, metaclass: MetaData) -> None:
self.metadata = metaclass
def _table(self, table: Table) -> str:
output = f"\t{table.name}"
output += " {\n"
columns = sorted(table.columns, key=utils.column_sort_key)
for column in columns:
output += self._column(column)
output += "\t}\n\n"
return output
def _column(self, column: Column) -> str:
column_str = f"{column.type} {column.name}"
if column.primary_key:
if len(column.foreign_keys) > 0:
column_str += " PK,FK"
else:
column_str += " PK"
elif len(column.foreign_keys) > 0:
column_str += " FK"
options = []
if column.nullable:
options.append("nullable")
if column.unique:
options.append("unique")
if column.index:
options.append("indexed")
if len(options) > 0:
column_str += f' "{",".join(options)}"'
return f"\t\t{column_str}\n"
def _relationships(self, column: Column) -> str:
output = ""
column_name = column.name
right_table = column.table.name
if column.unique:
right_operand = "o|"
else:
right_operand = "o{"
for foreign_key in column.foreign_keys:
key_parts = foreign_key.target_fullname.split(".")
left_table = key_parts[0]
left_column = key_parts[1]
left_operand = ""
lcolumn = self.metadata.tables[left_table].columns[left_column]
if lcolumn.unique or lcolumn.primary_key:
left_operand = "||"
else:
left_operand = "}o"
output += f"\t{left_table} {left_operand}--{right_operand} {right_table} : {column_name}\n"
return output
def __str__(self) -> str:
output = "erDiagram\n"
for table in self.metadata.tables.values():
output += self._table(table)
for table in self.metadata.tables.values():
for column in table.columns.values():
if len(column.foreign_keys) > 0:
output += self._relationships(column)
return output
# Path: paracelsus/cli.py
import importlib
import re
import sys
import typer
from enum import Enum
from pathlib import Path
from typing import List
from typing_extensions import Annotated
from .transformers.dot import Dot
from .transformers.mermaid import Mermaid
from . import _version
app = typer.Typer()
transformers = {
"mmd": Mermaid,
"mermaid": Mermaid,
| "dot": Dot, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: winniesi/tg-gemini-bot
# Path: api/auth.py
def is_authorized(from_id: int, user_name: str) -> bool:
if str(user_name) in ALLOWED_USERS:
return True
return False
# Path: api/context.py
class ChatManager:
"""setting up a basic conversation storage manager"""
def __init__(self):
self.chats: Dict[str, ChatConversation] = {}
def _new_chat(self, username: str) -> ChatConversation:
chat = ChatConversation()
self.chats[username] = chat
return chat
def get_chat(self, username: str) -> ChatConversation:
if self.chats.get(username) is None:
return self._new_chat(username)
return self.chats[username]
# Path: api/context.py
class ImageChatManger:
def __init__(self, prompt, file_id: str) -> None:
self.prompt = prompt
self.file_id = file_id
def tel_photo_url(self) -> str:
"""process telegram photo url"""
r_file_id = requests.get(
f"https://api.telegram.org/bot{BOT_TOKEN}/getFile?file_id={self.file_id}"
)
file_path = r_file_id.json().get("result").get("file_path")
download_url = f"https://api.telegram.org/file/bot{BOT_TOKEN}/{file_path}"
return download_url
def photo_bytes(self) -> BytesIO:
"""get photo bytes"""
photo_url = self.tel_photo_url()
response = requests.get(photo_url)
photo_bytes = BytesIO(response.content)
return photo_bytes
def send_image(self) -> str:
response = generate_text_with_image(self.prompt, self.photo_bytes())
return response
# Path: api/telegram.py
class Update:
def __init__(self, update: Dict) -> None:
self.update = update
self.from_id = update["message"]["from"]["id"]
self.type = self._type()
self.text = self._text()
self.photo_caption = self._photo_caption()
self.file_id = self._file_id()
self.user_name = update["message"]["from"]["username"]
def _type(self):
if "text" in self.update["message"]:
return "text"
elif "photo" in self.update["message"]:
return "photo"
else:
return ""
def _photo_caption(self):
if self.type == "photo":
return self.update["message"].get("caption", "describe the photo")
return ""
def _text(self):
if self.type == "text":
return self.update["message"]["text"]
return ""
def _file_id(self):
if self.type == "photo":
return self.update["message"]["photo"][0]["file_id"]
return ""
# Path: api/telegram.py
def send_message(chat_id, text):
"""send text message"""
payload = {
"chat_id": chat_id,
"text": escape(text),
"parse_mode": "MarkdownV2",
}
r = requests.post(f"{TELEGRAM_API}/sendMessage", data=payload)
print(f"Sent message: {text} to {chat_id}")
return r
# Path: api/handle.py
from .auth import is_authorized
from .context import ChatManager, ImageChatManger
from .telegram import Update, send_message
"""
All the chat that comes through the Telegram bot gets passed to the
handle_message function. This function checks out if the user has the
green light to chat with the bot. Once that's sorted, it figures out if
the user sent words or an image and deals with it accordingly.
For text messages, it fires up the ChatManager class that keeps track of
the back-and-forth with that user.
As for images, in Gemini pro, they're context-free, so you can handle
them pretty straight-up without much fuss.
"""
chat_manager = ChatManager()
def handle_message(update_data):
update = Update(update_data)
authorized = is_authorized(update.from_id, update.user_name)
if not authorized:
| send_message(update.from_id, "😫 You are not allowed to use this bot.") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: usail-hkust/LLMTSCS
# Path: utils/utils.py
def oneline_wrapper(dic_agent_conf, dic_traffic_env_conf, dic_path, roadnet, trafficflow):
results_table = []
all_rewards = []
all_queue_len = []
all_travel_time = []
for i in range(1):
dic_path["PATH_TO_MODEL"] = (dic_path["PATH_TO_MODEL"].split(".")[0] + ".json" +
time.strftime('%m_%d_%H_%M_%S', time.localtime(time.time())))
dic_path["PATH_TO_WORK_DIRECTORY"] = (dic_path["PATH_TO_WORK_DIRECTORY"].split(".")[0] + ".json" +
time.strftime('%m_%d_%H_%M_%S', time.localtime(time.time())))
oneline = OneLine(dic_agent_conf=dic_agent_conf,
dic_traffic_env_conf=merge(config.dic_traffic_env_conf, dic_traffic_env_conf),
dic_path=merge(config.DIC_PATH, dic_path),
roadnet=roadnet,
trafficflow=trafficflow
)
round_results = oneline.train(round=i)
results_table.append([round_results['test_reward_over'], round_results['test_avg_queue_len_over'],
round_results['test_avg_travel_time_over']])
all_rewards.append(round_results['test_reward_over'])
all_queue_len.append(round_results['test_avg_queue_len_over'])
all_travel_time.append(round_results['test_avg_travel_time_over'])
# delete junk
cmd_delete_model = 'rm -rf <dir>'.replace("<dir>", dic_path["PATH_TO_MODEL"])
cmd_delete_work = 'find <dir> -type f ! -name "state_action.json" -exec rm -rf {} \;'.replace("<dir>", dic_path["PATH_TO_WORK_DIRECTORY"])
os.system(cmd_delete_model)
os.system(cmd_delete_work)
results_table.append([np.average(all_rewards), np.average(all_queue_len), np.average(all_travel_time)])
results_table.append([np.std(all_rewards), np.std(all_queue_len), np.std(all_travel_time)])
table_logger = wandb.init(
project=dic_traffic_env_conf['PROJECT_NAME'],
group=f"{dic_traffic_env_conf['MODEL_NAME']}-{roadnet}-{trafficflow}-{len(dic_agent_conf['FIXED_TIME'])}_Phases",
name="exp_results",
config=merge(merge(dic_agent_conf, dic_path), dic_traffic_env_conf),
)
columns = ["reward", "avg_queue_len", "avg_travel_time"]
logger_table = wandb.Table(columns=columns, data=results_table)
table_logger.log({"results": logger_table})
wandb.finish()
return
# Path: utils/error.py
class flowFileException(Exception):
def __init__(self, message):
def __str__(self):
# Path: run_advanced_maxpressure.py
from utils.utils import oneline_wrapper
from utils import error
from multiprocessing import Process
import os
import time
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--memo", type=str, default='AdvancedMaxPressure')
parser.add_argument("--model", type=str, default="AdvancedMaxPressure")
parser.add_argument("--proj_name", type=str, default="chatgpt-TSCS")
parser.add_argument("--eightphase", action="store_true", default=False)
parser.add_argument("--multi_process", action="store_true", default=True)
parser.add_argument("--workers", type=int, default=1)
parser.add_argument("--dataset", type=str, default="template")
parser.add_argument("--traffic_file", type=str, default="flow_main_stream.json")
return parser.parse_args()
def main(in_args):
traffic_file_list = []
if in_args.dataset == 'jinan':
count = 3600
road_net = "3_4"
traffic_file_list = ["anon_3_4_jinan_real.json", "anon_3_4_jinan_real_2000.json", "anon_3_4_jinan_real_2500.json"]
template = "Jinan"
elif in_args.dataset == 'hangzhou':
count = 3600
road_net = "4_4"
traffic_file_list = ["anon_4_4_hangzhou_real.json", "anon_4_4_hangzhou_real_5816.json"]
template = "Hangzhou"
elif in_args.dataset == 'newyork_16x3':
count = 3600
road_net = "16_3"
traffic_file_list = ["anon_16_3_newyork_real.json"]
template = "NewYork"
elif in_args.dataset == 'newyork_28x7':
count = 3600
road_net = "28_7"
traffic_file_list = ["anon_28_7_newyork_real_double.json", "anon_28_7_newyork_real_triple.json"]
template = "NewYork"
elif in_args.dataset == 'template':
count = 3600
road_net = "1_1"
traffic_file_list = ["flow_main_stream.json"]
template = "template"
# flow_file error
try:
if in_args.traffic_file not in traffic_file_list:
| raise error.flowFileException('Flow file does not exist.') |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ohadmata/shmessy
# Path: src/shmessy/schema.py
class InferredField(BaseModel):
inferred_type: Optional[str] = None
inferred_pattern: Optional[Any] = None
# Path: src/shmessy/schema.py
class ValidatorTypes(str, Enum):
NUMERIC = "NUMERIC"
STRING = "STRING"
# Path: src/shmessy/types/base.py
class BaseType(ABC):
weight: int = 0
validator_types: Tuple[ValidatorTypes]
@abstractmethod
def validate(self, data: ndarray) -> Optional[InferredField]:
pass
@abstractmethod
def fix(self, column: Series, inferred_field: InferredField) -> Series:
pass
def is_validator_type_valid(self, dtype: Type) -> bool:
for possible_validator_type in self.validator_types:
if self._check_single_validator_type(dtype, possible_validator_type):
return True
return False
@staticmethod
def _check_single_validator_type(
dtype: Type, possible_validator_type: ValidatorTypes
) -> bool:
if possible_validator_type == ValidatorTypes.NUMERIC and not issubdtype(
dtype, number
):
return False
if possible_validator_type == ValidatorTypes.STRING and not (
issubdtype(dtype, object_) or issubdtype(dtype, str_)
):
return False
return True
@property
def name(self) -> str:
return str(self.__class__.__name__.replace("Type", ""))
# Path: src/shmessy/types/unix_timestamp.py
import logging
import math
from datetime import datetime
from enum import Enum
from typing import Optional
from numpy import ndarray
from pandas import Series, to_datetime
from ..schema import InferredField, ValidatorTypes
from .base import BaseType
logger = logging.getLogger(__name__)
class TimestampResolution(str, Enum):
SECONDS = "s"
MILLISECONDS = "ms"
NANOSECONDS = "ns"
class UnixTimestampType(BaseType):
weight = 4
validator_types = (ValidatorTypes.NUMERIC,)
min_valid_year: int = 1980
max_valid_year: int = 2100
@staticmethod
def _unix_timestamp_resolution(value: float) -> TimestampResolution:
number_of_digits = len(str(int(value)))
if number_of_digits == 10:
return TimestampResolution.SECONDS
if number_of_digits == 13:
return TimestampResolution.MILLISECONDS
if number_of_digits == 16:
return TimestampResolution.NANOSECONDS
@staticmethod
def _fix_input_resolution(
value: float, selected_resolution: TimestampResolution
) -> float:
if selected_resolution == TimestampResolution.SECONDS:
return value
if selected_resolution == TimestampResolution.MILLISECONDS:
return value / 1000
if selected_resolution == TimestampResolution.NANOSECONDS:
return value / 1000 / 1000
| def validate(self, data: ndarray) -> Optional[InferredField]: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: kokiez/solana-sniper
# Path: birdeye.py
def get_price(token_address):
url = f"https://api.dexscreener.com/latest/dex/tokens/{token_address}"
exclude = ['EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v', 'Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB']
response = requests.get(url).json()
if token_address not in exclude:
for pair in response['pairs']:
if pair['quoteToken']['address'] == 'So11111111111111111111111111111111111111112':
return float(pair['priceUsd'])
else:
return response['pairs'][0]['priceUsd']
return None
# Path: birdeye.py
def getSymbol(token):
# usdc and usdt
exclude = ['EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v', 'Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB']
if token not in exclude:
url = f"https://api.dexscreener.com/latest/dex/tokens/{token}"
Token_Symbol = ""
Sol_symbol=""
try:
response = requests.get(url)
# Check if the request was successful (status code 200)
if response.status_code == 200:
resp = response.json()
print("Response:",resp['pairs'][0]['baseToken']['symbol'])
for pair in resp['pairs']:
quoteToken = pair['quoteToken']['symbol']
if quoteToken == 'SOL':
Token_Symbol = pair['baseToken']['symbol']
Sol_symbol = quoteToken
return Token_Symbol, Sol_symbol
else:
print(f"[getSymbol] Request failed with status code {response.status_code}")
except requests.exceptions.RequestException as e:
print(f"[getSymbol] error occurred: {e}")
except:
a = 1
return Token_Symbol, Sol_symbol
else:
if token == 'EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v':
return "USDC", "SOL"
elif token == 'EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v':
return "USDT", "SOL"
# Path: webhook.py
def sendWebhook(title_type_info, description):
global error_webhook
global webhook_url
title = ""
title_type = title_type_info.split("|")
if title_type[0] == "msg":
title = title_type[1]
color = colors["Green"]
webhook(title, color, description, webhook_url)
elif title_type[0] == "msg_b":
title = title_type[1]
color = colors["DarkAqua"]
webhook(title, color, description, webhook_url)
elif title_type[0] == "msg_s":
title = title_type[1]
color = colors["DarkAqua"]
webhook(title, color, description, webhook_url)
elif title_type[0] == "i_s": #invest or slippage was changed etc
title = title_type[1]
color = colors["DarkPurple"]
webhook(title, color, description, webhook_url)
elif title_type[0] == "e": #error
title = title_type[1]
color = colors["DarkRed"]
webhook(title, color, description, error_webhook)
elif title_type[0] == "a": #alert
title = title_type[1]
color = colors["LuminousVividPink"]
webhook(title, color, description, webhook_url)
elif title_type[0] == "w": #wallet info
title = title_type[1]
color = colors["Gold"]
webhook(title, color, description, webhook_url)
# Path: monitor_price_strategy.py
import time
from birdeye import get_price, getSymbol
from webhook import sendWebhook
"""If you have ton of trades then best to use Simulate Transaction and modify this part of code to your needs"""
"""
Only Take Profit
"""
def limit_order(bought_token_price,desired_token_address, take_profit_ratio, execution_time, txB):
token_symbol, SOl_Symbol = getSymbol(desired_token_address)
# CALCULATE SELL LIMIT
sell_limit_token_price = bought_token_price * take_profit_ratio
print("-" * 79)
print(f"| {'Bought Price':<12} | {'Sell Limit':<12} | {'Tx Buy':<50} |")
print("-" * 79)
print(f"|{bought_token_price:.12f} | {sell_limit_token_price:.12f} {txB:<50} |")
print("-" * 79)
sendWebhook(f"msg_b|BUY INFO {token_symbol}",f"Bought Price: {bought_token_price:.12f}\n**Sell Limit: {sell_limit_token_price:.15f}**\nTotal Buy Execution time: {execution_time} seconds\nBuy TXN: https://solscan.io/tx/{txB} |")
# LOOP = CHECK IF PRICE >= SELL LIMIT | checks price every 5 seconds
priceLow = True
# while priceLow and isTimePassed(time_limit) == False:
while priceLow:
# Check if time limit has been passed for the token bought or not
| bought_token_curr_price = get_price(desired_token_address)
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: enochyearn/MLX_RoBERTa
# Path: custom/nn/layers/normalization.py
class LayerNormBasselCorrected(Module):
r"""Applies layer normalization [1] on the inputs with Bessel's Correction used by default like PyTorch.
Computes
.. math::
y = \frac{x - E[x]}{\sqrt{Var[x]} + \epsilon} \gamma + \beta,
where :math:`\gamma` and :math:`\beta` are learned per feature dimension
parameters initialized at 1 and 0 respectively.
Var[x] would by default apply Bessel's Correction.
[1]: https://arxiv.org/abs/1607.06450
Args:
dims (int): The feature dimension of the input to normalize over
eps (float): A small additive constant for numerical stability
affine (bool): If True learn an affine transform to apply after the
normalization
correction (bool):
"""
def __init__(self, dims: int, eps: float = 1e-5, affine: bool = True, correction: bool = True):
super().__init__()
if affine:
self.bias = mx.zeros((dims,))
self.weight = mx.ones((dims,))
self.eps = eps
self.dims = dims
self.correction = correction
def _extra_repr(self):
return f"{self.dims}, eps={self.eps}, affine={'weight' in self}"
def __call__(self, x):
means = mx.mean(x, axis=-1, keepdims=True)
var = mx.var(x, axis=-1, keepdims=True, ddof=int(self.correction))
x = (x - means) * mx.rsqrt(var + self.eps)
return (self.weight * x + self.bias) if "weight" in self else x
# Path: custom/nn/layers/normalization.py
class LayerNormTorchAlike(Module):
r"""Applies layer normalization [1] on the inputs in PyTorch's style.
MLX's official LayerNorm has a different behavior with PyTorch's.
Computes
.. math::
y = \frac{x - E[x]}{\sqrt{Var[x]} + \epsilon} \gamma + \beta,
where :math:`\gamma` and :math:`\beta` are learned per feature dimension
parameters initialized at 1 and 0 respectively.
Var[x] would by default apply Bessel's Correction.
[1]: https://arxiv.org/abs/1607.06450
Args:
dims (int): The feature dimension of the input to normalize over
eps (float): A small additive constant for numerical stability
affine (bool): If True learn an affine transform to apply after the
normalization
correction (bool):
"""
def __init__(self, dims: int, eps: float = 1e-5, affine: bool = True, correction: bool = True):
super().__init__()
if affine:
self.bias = mx.zeros((dims,))
self.weight = mx.ones((dims,))
self.eps = eps
self.dims = dims
self.correction = correction
def _extra_repr(self):
return f"{self.dims}, eps={self.eps}, affine={'weight' in self}"
def __call__(self, x):
# Calculate the mean of all elements;
# i.e. the means for each element $\mathbb{E}[X]$
mean = x.mean(axis=-1, keepdims=True)
# Calculate the squared mean of all elements;
# i.e. the means for each element $\mathbb{E}[X^2]$
mean_x2 = (x ** 2).mean(axis=-1, keepdims=True)
# Variance of all element $Var[X] = \mathbb{E}[X^2] - \mathbb{E}[X]^2$
var = mean_x2 - mean ** 2
# Normalize $$\hat{X} = \frac{X - \mathbb{E}[X]}{\sqrt{Var[X] + \epsilon}}$$
x_norm = (x - mean) / mx.sqrt(var + self.eps)
# Scale and shift $$\text{LN}(x) = \gamma \hat{X} + \beta$$
x_norm = self.weight * x_norm + self.bias
return x_norm
# Path: mlx_roberta.py
import argparse
import time
import mlx.core as mx
import mlx.nn as nn
import numpy as np
import math
from mlx.utils import tree_unflatten
from collections import OrderedDict
from custom.nn.layers.normalization import LayerNormBasselCorrected, LayerNormTorchAlike
from transformers import RobertaTokenizer
from dataclasses import dataclass
# utils
@dataclass
class ModelConfig:
intermediate_size: int = 3072
hidden_size: int = 768
no_heads: int = 12
hidden_layers: int = 12
vocab_size: int = 50265
attention_probs_dropout_prob: float = 0.1
hidden_dropout_prob: float = 0.1
layer_norm_eps: float = 1e-5
max_position_embeddings: int = 514
# QA model's parameters
num_labels: int = 2
type_vocab_size: int = 2
pad_token_id: int = 1
chunk_size_feed_forward: int = 0
model_configs = {
"deepset/roberta-base-squad2": ModelConfig(),
"roberta-base": ModelConfig(),
}
model_types = {
"deepset/roberta-base-squad2": "qa",
"roberta-base": "base",
}
class RobertaEmbeddings(nn.Module):
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
| self.LayerNorm = LayerNormTorchAlike(config.hidden_size, eps=config.layer_norm_eps, correction=True) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: zy7y/dfs-generate
# Path: entity.py
class CodeGen(BaseVo):
name: str
code: str
@field_serializer("code")
def serialize_code(self, code: str, _info):
_code = black.format_str(code, mode=black.FileMode())
return isort.code(_code)
# Path: entity.py
class Conf(SQLModel, table=True):
__tablename__ = "dfs_conf"
id: int = Field(None, primary_key=True)
db_uri: str = Field(..., description="数据库连接")
@classmethod
def get_db_uri_last_new(cls):
"""获取最新的db_url"""
with Session(engine) as session:
query = select(cls).order_by(cls.id.desc())
latest_conf = session.exec(query).first()
if latest_conf:
return latest_conf.db_uri
else:
return None
@classmethod
def create(cls, uri) -> "Conf":
with Session(engine) as session:
obj = cls(db_uri=uri)
session.add(obj)
session.commit()
session.refresh(obj)
return obj
@classmethod
def get_last_uri_with_metadata(cls):
uri = cls.get_db_uri_last_new()
return uri, get_metadata_by_db_uri(uri)
# Path: entity.py
class DBConf(SQLModel):
user: str
password: str
port: int
host: str
db: str
def get_db_uri(self):
return f"mysql+pymysql://{self.user}:{self.password}@{self.host}:{self.port}/{self.db}"
def get_metadata(self):
return get_metadata_by_db_uri(self.get_db_uri())
# Path: entity.py
class R(BaseModel, Generic[T]):
code: int = 20000
msg: str = "ok"
data: Optional[T] = None
@classmethod
def success(cls, **kwargs):
return cls(**kwargs)
@classmethod
def error(cls, msg):
return cls(code=40000, msg=msg)
# Path: entity.py
class RList(R[T]):
data: List[T] = Field(default_factory=list)
# Path: entity.py
class Table(BaseVo):
table_name: str
table_comment: Optional[str] = None
# Path: generate/main.py
def generate_code(table: Table, uri: str):
return [
{"name": "model.py", "code": GenerateEntity(table).render()},
{"name": "router.py", "code": render_router(table.name)},
{"name": "main.py", "code": render_main(table.name)},
{"name": "db.py", "code": render_db(uri)},
]
# Path: main.py
from fastapi import FastAPI, Query
from fastapi.requests import Request
from fastapi.responses import FileResponse
from fastapi.staticfiles import StaticFiles
from entity import CodeGen, Conf, DBConf, R, RList, Table
from generate.main import generate_code
import uvicorn
app = FastAPI(
title="dfs-generate", description="FastAPI SQLModel 逆向生成代码", docs_url=None
)
app.mount("/static", StaticFiles(directory="static"), name="static")
@app.get("/", include_in_schema=False)
def index():
return FileResponse("static/index.html")
| @app.get("/tables", response_model=RList[Table]) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: CrawlScript/Torch-MGDCF
# Path: torch_mgdcf/metrics/ranking.py
def ndcg_score(reference, hypothesis):
"""
Normalized Discounted Cumulative Gain (nDCG)
Normalized version of DCG:
nDCG = DCG(hypothesis)/DCG(reference)
Parameters:
reference - a gold standard (perfect) ordering Ex: [5,4,3,2,1]
hypothesis - a proposed ordering Ex: [5,2,2,3,1]
Returns:
ndcg_score - normalized score
"""
return dcg_score(hypothesis)/dcg_score(reference)
# Path: torch_mgdcf/metrics/ranking.py
def precision_score(reference, hypothesis):
result = np.sum(hypothesis, dtype=np.float32)/len(hypothesis)
return result
# Path: torch_mgdcf/metrics/ranking.py
def recall_score(reference, hypothesis):
result = np.sum(hypothesis, dtype=np.float32) / len(reference)
return result
# Path: torch_mgdcf/vector_search/vector_search.py
class VectorSearchEngine(object):
def __init__(self, vectors):
super().__init__()
if isinstance(vectors, torch.Tensor):
self.vectors = vectors.detach().cpu().numpy()
else:
self.vectors = np.array(vectors)
self.dim = self.vectors.shape[1]
self.index = faiss.IndexFlatIP(self.dim)
self.index.add(self.vectors)
def search(self, query_vectors, k=10):
query_vectors = np.asarray(query_vectors)
topK_distances, topK_indices = self.index.search(query_vectors, k)
return topK_distances, topK_indices
# Path: torch_mgdcf/evaluation/ranking.py
from tqdm import tqdm
from torch_mgdcf.metrics.ranking import ndcg_score, precision_score, recall_score
from torch_mgdcf.vector_search.vector_search import VectorSearchEngine
import numpy as np
import torch
# coding=utf-8
# The code is from our another project GRecX: https://github.com/maenzhier/grecx_datasets
def score(ground_truth, pred_items, k_list, metrics):
pred_match = [1 if item in ground_truth else 0 for item in pred_items]
max_k = k_list[-1]
if len(ground_truth) > max_k:
ndcg_gold = [1] * max_k
else:
ndcg_gold = [1] * len(ground_truth) + [0] * (max_k - len(ground_truth))
res_score = []
for metric in metrics:
if metric == "ndcg":
score_func = ndcg_score
elif metric == "precision":
score_func = precision_score
elif metric == "recall":
score_func = recall_score
else:
raise Exception("Not Found Metric : {}".format(metric))
for k in k_list:
if metric == "ndcg":
res_score.append(score_func(ndcg_gold[:k], pred_match[:k]))
else:
res_score.append(score_func(ground_truth, pred_match[:k]))
return res_score
def evaluate_mean_global_metrics(user_items_dict, user_mask_items_dict,
user_embedding, item_embedding,
k_list=[10, 20], metrics=["ndcg"]):
| v_search = VectorSearchEngine(item_embedding) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: KyanChen/TTP
# Path: mmseg/utils/typing_utils.py
# Path: opencd/registry.py
MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['opencd.models'])
# Path: opencd/models/data_preprocessor.py
from numbers import Number
from typing import Any, Dict, List, Optional, Sequence, Union
from mmengine.model import BaseDataPreprocessor
from mmseg.utils import SampleList
from opencd.registry import MODELS
import numpy as np
import torch
import torch.nn.functional as F
# Copyright (c) Open-CD. All rights reserved.
def stack_batch(inputs: List[torch.Tensor],
data_samples: Optional[SampleList] = None,
size: Optional[tuple] = None,
size_divisor: Optional[int] = None,
pad_val: Union[int, float] = 0,
seg_pad_val: Union[int, float] = 255) -> torch.Tensor:
"""Stack multiple inputs to form a batch and pad the images and gt_sem_segs
to the max shape use the right bottom padding mode.
Args:
inputs (List[Tensor]): The input multiple tensors. each is a
CHW 3D-tensor.
data_samples (list[:obj:`SegDataSample`]): The list of data samples.
It usually includes information such as `gt_sem_seg`.
size (tuple, optional): Fixed padding size.
size_divisor (int, optional): The divisor of padded size.
pad_val (int, float): The padding value. Defaults to 0
seg_pad_val (int, float): The padding value. Defaults to 255
Returns:
Tensor: The 4D-tensor.
List[:obj:`SegDataSample`]: After the padding of the gt_seg_map.
"""
assert isinstance(inputs, list), \
f'Expected input type to be list, but got {type(inputs)}'
assert len({tensor.ndim for tensor in inputs}) == 1, \
f'Expected the dimensions of all inputs must be the same, ' \
f'but got {[tensor.ndim for tensor in inputs]}'
assert inputs[0].ndim == 3, f'Expected tensor dimension to be 3, ' \
f'but got {inputs[0].ndim}'
assert len({tensor.shape[0] for tensor in inputs}) == 1, \
f'Expected the channels of all inputs must be the same, ' \
f'but got {[tensor.shape[0] for tensor in inputs]}'
# only one of size and size_divisor should be valid
assert (size is not None) ^ (size_divisor is not None), \
'only one of size and size_divisor should be valid'
padded_inputs = []
padded_samples = []
inputs_sizes = [(img.shape[-2], img.shape[-1]) for img in inputs]
max_size = np.stack(inputs_sizes).max(0)
if size_divisor is not None and size_divisor > 1:
# the last two dims are H,W, both subject to divisibility requirement
max_size = (max_size +
(size_divisor - 1)) // size_divisor * size_divisor
for i in range(len(inputs)):
tensor = inputs[i]
if size is not None:
width = max(size[-1] - tensor.shape[-1], 0)
height = max(size[-2] - tensor.shape[-2], 0)
# (padding_left, padding_right, padding_top, padding_bottom)
padding_size = (0, width, 0, height)
elif size_divisor is not None:
width = max(max_size[-1] - tensor.shape[-1], 0)
height = max(max_size[-2] - tensor.shape[-2], 0)
padding_size = (0, width, 0, height)
else:
padding_size = [0, 0, 0, 0]
# pad img
pad_img = F.pad(tensor, padding_size, value=pad_val)
padded_inputs.append(pad_img)
# pad gt_sem_seg
if data_samples is not None:
data_sample = data_samples[i]
gt_sem_seg = data_sample.gt_sem_seg.data
del data_sample.gt_sem_seg.data
data_sample.gt_sem_seg.data = F.pad(
gt_sem_seg, padding_size, value=seg_pad_val)
if 'gt_edge_map' in data_sample:
gt_edge_map = data_sample.gt_edge_map.data
del data_sample.gt_edge_map.data
data_sample.gt_edge_map.data = F.pad(
gt_edge_map, padding_size, value=seg_pad_val)
if 'gt_seg_map_from' in data_sample:
gt_seg_map_from = data_sample.gt_seg_map_from.data
del data_sample.gt_seg_map_from.data
data_sample.gt_seg_map_from.data = F.pad(
gt_seg_map_from, padding_size, value=seg_pad_val)
if 'gt_seg_map_to' in data_sample:
gt_seg_map_to = data_sample.gt_seg_map_to.data
del data_sample.gt_seg_map_to.data
data_sample.gt_seg_map_to.data = F.pad(
gt_seg_map_to, padding_size, value=seg_pad_val)
data_sample.set_metainfo({
'img_shape': tensor.shape[-2:],
'pad_shape': data_sample.gt_sem_seg.shape,
'padding_size': padding_size
})
padded_samples.append(data_sample)
else:
padded_samples.append(
dict(
img_padding_size=padding_size,
pad_shape=pad_img.shape[-2:]))
return torch.stack(padded_inputs, dim=0), padded_samples
| @MODELS.register_module() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: N0rz3/Phunter
# Path: lib/free_lookup.py
async def free(phone_number):
r = await Request("https://free-lookup.net/{}".format(phone_number), headers={'user-agent': random.choice(agent)}).get()
html_body = BeautifulSoup(r.text, "html.parser")
list_info = html_body.findChild("ul", class_="report-summary__list").findAll("div")
info_dict = {
k.text.strip(): info.text.strip() if info.text.strip() else "Not found"
for _, (k, info) in enumerate(zip(list_info[::2], list_info[1::2]))
}
print(f"\n [{GREEN}>{WHITE}] Free-lookup")
for key, value in info_dict.items():
if value != "Not found":
print(f" ├── {key}: {value}")
else:
continue
# Path: lib/spam.py
async def spamcalls(p_n):
print(f"\n [{GREEN}>{WHITE}] Spamcalls")
url = f"https://spamcalls.net/en/number/{p_n}"
r = await Request(url, headers={'user-agent': random.choice(user_agent)}).get()
if r.status_code == 200:
print(f" └── {RED}!{WHITE} Spammer")
else:
print(f" └── {GREEN}>{WHITE} Not spammer")
# Path: lib/lookup.py
import phonenumbers
import json
from phonenumbers import carrier
from .reputation import *
from .free_lookup import free
from .spam import spamcalls
from lib.text import *
async def lookup(phone_number):
print()
parsed = phonenumbers.parse(phone_number)
operator = carrier.name_for_number(parsed, "fr")
line = phonenumbers.number_type(parsed)
if line == phonenumbers.PhoneNumberType.FIXED_LINE:
ligne = f" [{GREEN}>{WHITE}] Line type: Fixed"
elif line == phonenumbers.PhoneNumberType.MOBILE:
ligne = f" [{GREEN}>{WHITE}] Line type: Mobile"
else:
ligne = " [-] Line not found"
possible = phonenumbers.is_possible_number(parsed)
valid = phonenumbers.is_valid_number(parsed)
with open("lib/country.json", "r") as file:
read = json.load(file)
d = 0
countrys = []
for country, code in read.items():
d += 1
if phone_number.startswith(code):
countrys.append(country)
if d == 153:
break
else:
continue
else:
continue
print(f"{WHITE}📞 Phone number: {BLUE}{phone_number}{WHITE}")
if possible == True:
pos = {"possible": "✔️"}
else:
pos = {"possible": "❌"}
if valid == True:
val = {"valid": "✔️"}
else:
val = {"valid": "❌"}
print(f" [{GREEN}>{WHITE}] Possible: {pos['possible']}")
print(f" [{GREEN}>{WHITE}] Valid: {val['valid']}")
print()
if operator != "":
print(f" [{GREEN}>{WHITE}] Operator: {operator}")
else:
print(f" [-] Not Operator")
try:
print(f" [{GREEN}>{WHITE}] Possible location: " + str(countrys).replace("[", "").replace("]", "").replace("'", ""))
except:
print(f" [-] Not location")
print(ligne)
await reputation(phone_number)
| await free(str(phone_number).replace("+", ""))
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: dan-r/HomeAssistant-Ohme
# Path: custom_components/ohme/const.py
DOMAIN = "ohme"
# Path: custom_components/ohme/const.py
DATA_COORDINATORS = "coordinators"
# Path: custom_components/ohme/const.py
COORDINATOR_CHARGESESSIONS = 0
# Path: custom_components/ohme/const.py
COORDINATOR_ADVANCED = 3
# Path: custom_components/ohme/const.py
DATA_CLIENT = "client"
# Path: custom_components/ohme/coordinator.py
class OhmeChargeSessionsCoordinator(DataUpdateCoordinator):
"""Coordinator to pull main charge state and power/current draw."""
def __init__(self, hass):
"""Initialise coordinator."""
super().__init__(
hass,
_LOGGER,
name="Ohme Charge Sessions",
update_interval=timedelta(seconds=30),
)
self._client = hass.data[DOMAIN][DATA_CLIENT]
async def _async_update_data(self):
"""Fetch data from API endpoint."""
try:
return await self._client.async_get_charge_sessions()
except BaseException:
raise UpdateFailed("Error communicating with API")
# Path: custom_components/ohme/coordinator.py
class OhmeAdvancedSettingsCoordinator(DataUpdateCoordinator):
"""Coordinator to pull CT clamp reading."""
def __init__(self, hass):
"""Initialise coordinator."""
super().__init__(
hass,
_LOGGER,
name="Ohme Advanced Settings",
update_interval=timedelta(minutes=1),
)
self._client = hass.data[DOMAIN][DATA_CLIENT]
async def _async_update_data(self):
"""Fetch data from API endpoint."""
try:
return await self._client.async_get_advanced_settings()
except BaseException:
raise UpdateFailed("Error communicating with API")
# Path: custom_components/ohme/utils.py
def charge_graph_in_slot(charge_start, points, skip_format=False):
"""Are we currently in a charge slot?"""
now = int(time())
data = points if skip_format else _format_charge_graph(charge_start, points)
# Loop through every value, skipping the last
for idx in range(0, len(data) - 1):
# This is our current point
if data[idx]["t"] < now and data[idx + 1]["t"] > now:
# If the delta line we are on is steeper than 10,
# we are in a charge slot.
if data[idx + 1]["y"] - data[idx]["y"] > 10:
return True
break
return False
# Path: custom_components/ohme/binary_sensor.py
import logging
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity
)
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.util.dt import (utcnow)
from .const import DOMAIN, DATA_COORDINATORS, COORDINATOR_CHARGESESSIONS, COORDINATOR_ADVANCED, DATA_CLIENT
from .coordinator import OhmeChargeSessionsCoordinator, OhmeAdvancedSettingsCoordinator
from .utils import charge_graph_in_slot
"""Platform for sensor integration."""
from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: core.HomeAssistant,
config_entry: config_entries.ConfigEntry,
async_add_entities,
):
"""Setup sensors and configure coordinator."""
client = hass.data[DOMAIN][DATA_CLIENT]
| coordinator = hass.data[DOMAIN][DATA_COORDINATORS][COORDINATOR_CHARGESESSIONS] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Almas-Ali/SpyIP
# Path: spyip/exceptions.py
class TooManyRequests(Exception):
pass
# Path: spyip/exceptions.py
class ConnectionTimeout(Exception):
pass
# Path: spyip/exceptions.py
class StatusError(Exception):
pass
# Path: spyip/models.py
class IPResponse(BaseModel):
"""
Example response from API:
{
"status": "success",
"continent": "Asia",
"continentCode": "AS",
"country": "India",
"countryCode": "IN",
"region": "DL",
"regionName": "National Capital Territory of Delhi",
"city": "New Delhi",
"district": "",
"zip": "110001",
"lat": 28.6139,
"lon": 77.209,
"timezone": "Asia/Kolkata",
"offset": 19800,
"currency": "INR",
"isp": "Google LLC",
"org": "Google LLC",
"as": "AS15169 Google LLC",
"asname": "GOOGLE",
"mobile": false,
"proxy": false,
"hosting": true,
"query": "142.250.193.206",
}
"""
status: str = Field(..., description='Status of the request.')
continent: str = Field(..., description='Continent name.')
continentCode: str = Field(..., description='Continent code.')
country: str = Field(..., description='Country name.')
countryCode: str = Field(..., description='Country code.')
region: str = Field(..., description='Region code.')
regionName: str = Field(..., description='Region name.')
city: str = Field(..., description='City name.')
district: str = Field(..., description='District name.')
zip_: str = Field(..., description='Zip code.')
lat: float = Field(..., description='Latitude.')
lon: float = Field(..., description='Longitude.')
timezone: str = Field(..., description='Timezone.')
offset: int = Field(..., description='Offset.')
currency: str = Field(..., description='Currency.')
isp: str = Field(..., description='ISP name.')
org: str = Field(..., description='Organization name.')
as_: str = Field(..., description='AS number and name.')
asname: str = Field(..., description='AS name.')
mobile: bool = Field(..., description='Mobile status.')
proxy: bool = Field(..., description='Proxy status.')
hosting: bool = Field(..., description='Hosting status.')
query: str = Field(..., description='IP address.')
class Config:
def alias_generator(x):
return x.replace('_', '')
populate_by_name = True
# fields = { # Alias for reserved keywords
# "as_": "as",
# "zip_": "zip",
# }
@field_validator('status')
def check_status(cls, v):
if v != 'success':
raise ValueError('Invalid IP address.')
return v
def json(self, **kwargs) -> str:
return self.model_dump_json(**kwargs)
# Path: spyip/models.py
class DNSResponse(BaseModel):
"""
Example response from API:
"dns": {
"ip": "74.125.73.83",
"geo": "United States - Google"
}
"""
ip: str = Field(..., description='IP address.')
geo: str = Field(..., description='Geo location.')
def json(self, **kwargs) -> str:
return self.model_dump_json(**kwargs)
# Path: spyip/backend.py
from typing import List, Union
from .exceptions import (
TooManyRequests,
ConnectionTimeout,
StatusError,
)
from .models import (
IPResponse,
DNSResponse,
)
import asyncio
import random
import string
import httpx
def get_random_string(length: int = 32) -> str:
"""Generate a random string of fixed length."""
letters = string.ascii_lowercase + string.digits
return ''.join(random.sample(letters, length))
# API endpoints for IP address lookup
trace_me_url = 'http://ip-api.com/json/'
trace_ip_url = 'http://ip-api.com/json/%(query)s'
trace_dns_url = f'http://{get_random_string(32)}.edns.ip-api.com/json/'
trace_ip_batch_url = 'http://ip-api.com/batch'
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
}
def trace_me(
timeout: int = 5,
lang: str = 'en',
| ) -> Union[IPResponse, None]: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: leopedroso45/Stable-Diffusion-ImageGen
# Path: sevsd/process_task.py
def check_cuda_and_clear_cache():
r"""
Clears the CUDA cache if available, otherwise performs garbage collection.
This function is called to manage memory usage, particularly when working with large models or multiple image generations.
"""
if torch.cuda.is_available():
torch.cuda.empty_cache()
else:
gc.collect()
# Path: sevsd/process_task.py
def process_task(job, pipeline, executor, path, parallel_exec=True):
r"""
Processes a single image generation job using the specified pipeline and execution parameters.
This function handles the generation of one or more images based on a given job description. It supports both parallel and sequential execution modes. Generated images are saved to the specified path.
Parameters:
job (dict): A dictionary containing details for the image generation task. It includes 'prompt' and optionally 'negative_prompt'.
pipeline (callable): The Stable Diffusion pipeline callable used for generating images.
executor (dict): A dictionary containing execution parameters such as 'num_of_exec', 'cfg_scale', and 'inference_steps'.
path (str): The directory path where generated images will be saved.
parallel_exec (bool, optional): If True, generates all specified images in parallel. Defaults to True.
The function saves each generated image with a unique timestamp in the specified path and prints the save location. In case of any exceptions, they are caught and printed.
Example:
job = {
"prompt": "A scenic landscape",
"negative_prompt": "blurred image, black and white, watermarked image"
}
executor = {
"num_of_exec": 2,
"cfg_scale": 7,
"inference_steps": 50
}
pipeline = setup_pipeline("CompVis/stable-diffusion-v1-4")
process_task(job, pipeline, executor, "./generated-images", parallel_exec=False)
Note:
This function also handles CUDA cache clearing and garbage collection for memory management.
"""
def call_generate_image():
images = generate_image(job, pipeline, executor, parallel_exec)
if images is not None:
for image in images:
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S%f")
image_path = f"{path}/generated_image_{timestamp}.png"
image.save(image_path)
print(f"[sevsd] - image saved at {image_path}")
else:
print("[sevsd] - image generation failed due to memory constraints.")
check_cuda_and_clear_cache()
try:
path = check_os_path(path)
if job is not None:
if parallel_exec is not True:
num_images = executor.get("num_of_exec", 1)
for _ in range(num_images):
call_generate_image()
else:
call_generate_image()
except Exception as e:
print(f"[sevsd] - exception: {e}")
finally:
check_cuda_and_clear_cache()
# Path: sevsd/process_task.py
def check_os_path(path):
r"""
Checks if the given path exists, and if not, creates the necessary directories.
This function ensures that the output path for saving images is available.
Parameters:
path (str): The directory path to check and create if necessary.
Returns:
str: The verified or created directory path.
"""
if not os.path.exists(path):
os.makedirs(path)
print(f"[sevsd] - created path: {path}")
return path
# Path: tests/test_process_task.py
import unittest
import sys
from unittest.mock import patch, MagicMock
from sevsd.process_task import check_cuda_and_clear_cache, process_task, check_os_path
sys.path.append('../')
class TestProcessTask(unittest.TestCase):
@patch('sevsd.process_task.generate_image')
def test_process_task(self, mock_generate_image):
mock_image = MagicMock()
mock_image.save = MagicMock()
mock_generate_image.return_value = [mock_image]
fake_job = {"prompt": "prompt", "details": (None, 50, 1, 7.5)}
fake_pipeline = MagicMock()
fake_executor = {"num_of_exec": 1, "cfg_scale": 7}
fake_path = "test_path"
| process_task(fake_job, fake_pipeline, fake_executor, fake_path, parallel_exec=True) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Emperor-WS/PyEmber
# Path: ember/autograd/hook.py
class Hook:
"""
Hook class for attaching gradient functions to tensors.
Hooks allow users to attach custom gradient functions to tensors for
monitoring or modifying gradients during backpropagation.
Attributes:
- tensor (Tensor): The target tensor.
- grad_fn (callable): The gradient function to be applied to the tensor.
Methods:
- __init__(self, tensor, grad_fn): Constructor for Hook class.
- __repr__(self): String representation of the Hook instance.
"""
__slots__ = 'tensor', 'grad_fn'
def __init__(self, tensor, grad_fn):
"""
Constructor for the Hook class.
Args:
- tensor (Tensor): The target tensor.
- grad_fn (callable): The gradient function to be applied to the tensor.
"""
self.tensor = tensor
self.grad_fn = grad_fn
def __repr__(self):
"""
String representation of the Hook instance.
Returns:
- str: A string containing information about the tensor and its associated gradient function.
"""
# Extract the class name from the qualified name of the gradient function
grad_name = self.grad_fn.__qualname__.split('.')[0]
return f"Hook(tensor_id={self.tensor.id}, grad_fn={grad_name.upper()})"
# Path: ember/autograd/_utils.py
def numpy_unpad(x, pad_width):
"""
Remove padding from an array.
Args:
- x (numpy.ndarray): Input array.
- pad_width (tuple of ints): Amount of padding on each dimension.
Returns:
- numpy.ndarray: Unpadded array.
"""
slices = []
for pad in pad_width:
end = None if pad[1] == 0 else -pad[1]
slices.append(slice(pad[0], end ))
return x[tuple(slices)]
# Path: ember/autograd/_utils.py
def inv_permutation(permutation):
"""
Compute the inverse of a permutation.
Args:
- permutation (list): List representing a permutation.
Returns:
- list: Inverse permutation.
"""
inverse = [0] * len(permutation)
for original_idx, permuted_idx in enumerate(permutation):
inverse[permuted_idx] = original_idx
return inverse
# Path: ember/autograd/numeric.py
import numpy as np
import ember
from .hook import Hook
from ._utils import numpy_unpad, inv_permutation
def _T(t):
"""
Transpose operation on the input tensor.
Args:
- t: Input tensor.
Returns:
- Tensor: Resultant tensor with the transpose operation applied.
"""
t = ember.to_tensor(t) # Convert the input tensor to a Tensor
data = t.data.T # Transpose operation
requires_grad = t.requires_grad # Set requires_grad based on input tensor
hooks = []
# Register a hook for gradient computation if the input tensor requires it
if requires_grad:
| hooks.append(Hook(t, lambda grad: grad.T)) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Hassi34/iot-device-identification
# Path: src/utils/common.py
def read_yaml(path_to_yaml: str) -> dict:
with open(path_to_yaml) as yaml_file:
content = yaml.safe_load(yaml_file)
return content
# Path: src/utils/sys_logging.py
def get_logger(logs_filepath: str):
logger.add(
logs_filepath,
format="{time} | {level} | {name}.{module}:{line} | {message}",
level="DEBUG",
rotation="10 KB",
retention="10 days",
compression="zip",
colorize=True,
enqueue=True,
catch=True,
encoding="utf-8",
)
return logger
# Path: src/utils/common.py
def write_dict_to_yaml(dict_input: dict, yaml_file_path: str):
try:
current_file_data = read_yaml(yaml_file_path)
current_file_data.update(dict_input)
with open(yaml_file_path, "w") as f:
yaml.dump(current_file_data, f)
except (FileNotFoundError , AttributeError):
with open(yaml_file_path, "w") as f:
yaml.dump(dict_input, f)
# Path: src/utils/data_ops.py
def gzip_np_arr(np_array: np.ndarray, filepath: str):
with gzip.GzipFile(filepath, "w") as f:
np.save(file=f, arr=np_array)
# Path: src/utils/data_ops.py
def get_fitted_pipeline(df, columns, KNN_IMPUTER_NEIGHBORS: int = 3):
ct = ColumnTransformer(
transformers=[("input_features", "passthrough", columns)], remainder="drop"
)
imputer = KNNImputer(n_neighbors=KNN_IMPUTER_NEIGHBORS)
scaler = StandardScaler()
pipeline = Pipeline(
steps=[("select_columns", ct), ("imputer", imputer), ("scaler", scaler)]
)
return pipeline.fit(df)
# Path: src/stage_03_preprocess_data.py
import argparse
import joblib
import pandas as pd
from src.utils.common import read_yaml
from src.utils.sys_logging import get_logger
from sklearn.preprocessing import LabelEncoder
from src.utils.common import write_dict_to_yaml
from src.utils.data_ops import gzip_np_arr
from sklearn.model_selection import train_test_split
from src.utils.data_ops import get_fitted_pipeline
from pathlib import Path
STAGE = "Preprocess Data"
def preprocess_data():
complete_df = pd.read_parquet(RAW_DATA_FILE_PATH)
logger.info(
f'The raw data file has been loaded from "{RAW_DATA_FILE_PATH}" with the shape "{complete_df.shape}"'
)
duplicate_rows = complete_df.duplicated().sum()
if duplicate_rows > 0:
logger.warning(
f"Found {duplicate_rows} duplicate rows, removing duplicate rows..."
)
complete_df = complete_df.drop_duplicates(keep="first")
X = complete_df.drop([TARGET_COLUMN_NAME], axis=1)
y = complete_df[TARGET_COLUMN_NAME]
feature_cols = params["input_features_schema"]
feature_cols = list(feature_cols.keys())
logger.info(f"Read {len(feature_cols)} feature columns from params")
data_processing_pipeline = get_fitted_pipeline(
X, feature_cols, KNN_IMPUTER_NEIGHBORS=KNN_IMPUTER_NEIGHBORS
)
Path(DATA_PREPROCESSING_PIPELINE_FILE_PATH).parent.absolute().mkdir(parents=True, exist_ok=True)
joblib.dump(data_processing_pipeline, DATA_PREPROCESSING_PIPELINE_FILE_PATH, compress=1)
logger.info(f"Saved the preprocessing pipeline to {DATA_PREPROCESSING_PIPELINE_FILE_PATH}")
data_processing_pipeline = joblib.load(DATA_PREPROCESSING_PIPELINE_FILE_PATH)
data_processing_pipeline
data_processing_pipeline = joblib.load(DATA_PREPROCESSING_PIPELINE_FILE_PATH)
logger.info(
f'Loaded sklearn data preprocessing pipeline from "{DATA_PREPROCESSING_PIPELINE_FILE_PATH}"'
)
X_transformed = data_processing_pipeline.transform(X)
logger.info(f'Dataframe shape after transformation is "{X_transformed.shape}"')
le = LabelEncoder()
le.fit(y)
labels_mapping_dict = {"labels_mapping": ""}
le_dict = dict(zip(le.transform(le.classes_), le.classes_))
le_dict = {int(k): v for k, v in le_dict.items()}
labels_mapping_dict["labels_mapping"] = le_dict
logger.info(f"Label encoding map has the dictionary: {le_dict}")
write_dict_to_yaml(labels_mapping_dict, parsed_args.params)
logger.info(f'Updated the label encoding map in the file at "{parsed_args.params}"')
| labels_dict = read_yaml(parsed_args.params)["labels_mapping"] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: see2023/Bert-VITS2-ext
# Path: config.py
class Resample_config:
class Preprocess_text_config:
class Bert_gen_config:
class Emo_gen_config:
class Train_ms_config:
class Webui_config:
class Server_config:
class Translate_config:
class Config:
def __init__(self, in_dir: str, out_dir: str, sampling_rate: int = 44100):
def from_dict(cls, dataset_path: str, data: Dict[str, any]):
def __init__(
self,
transcription_path: str,
cleaned_path: str,
train_path: str,
val_path: str,
config_path: str,
val_per_lang: int = 5,
max_val_total: int = 10000,
clean: bool = True,
):
def from_dict(cls, dataset_path: str, data: Dict[str, any]):
def __init__(
self,
config_path: str,
num_processes: int = 2,
device: str = "cuda",
use_multi_device: bool = False,
):
def from_dict(cls, dataset_path: str, data: Dict[str, any]):
def __init__(
self,
config_path: str,
num_processes: int = 2,
device: str = "cuda",
use_multi_device: bool = False,
):
def from_dict(cls, dataset_path: str, data: Dict[str, any]):
def __init__(
self,
config_path: str,
env: Dict[str, any],
base: Dict[str, any],
model: str,
num_workers: int,
spec_cache: bool,
keep_ckpts: int,
):
def from_dict(cls, dataset_path: str, data: Dict[str, any]):
def __init__(
self,
device: str,
model: str,
v_model: str,
config_path: str,
language_identification_library: str,
port: int = 7860,
share: bool = False,
debug: bool = False,
):
def from_dict(cls, dataset_path: str, data: Dict[str, any]):
def __init__(
self, models: List[Dict[str, any]], port: int = 5000, device: str = "cuda"
):
def from_dict(cls, data: Dict[str, any]):
def __init__(self, app_key: str, secret_key: str):
def from_dict(cls, data: Dict[str, any]):
def __init__(self, config_path: str):
# Path: text/japanese.py
def text2sep_kata(text: str) -> (list, list):
parsed = pyopenjtalk.run_frontend(text)
res = []
sep = []
for parts in parsed:
word, yomi = replace_punctuation(parts["string"]), parts["pron"].replace(
"’", ""
)
if yomi:
if re.match(_MARKS, yomi):
if len(word) > 1:
word = [replace_punctuation(i) for i in list(word)]
yomi = word
res += yomi
sep += word
continue
elif word not in rep_map.keys() and word not in rep_map.values():
word = ","
yomi = word
res.append(yomi)
else:
if word in _SYMBOL_TOKENS:
res.append(word)
elif word in ("っ", "ッ"):
res.append("ッ")
elif word in _NO_YOMI_TOKENS:
pass
else:
res.append(word)
sep.append(word)
return sep, [hira2kata(i) for i in res], get_accent(parsed)
# Path: for_deploy/infer_utils.py
import sys
import torch
from transformers import (
AutoModelForMaskedLM,
AutoTokenizer,
DebertaV2Model,
DebertaV2Tokenizer,
ClapModel,
ClapProcessor,
)
from config import config
from text.japanese import text2sep_kata
class BertFeature:
def __init__(self, model_path, language="ZH"):
self.model_path = model_path
self.language = language
self.tokenizer = None
self.model = None
self.device = None
self._prepare()
def _get_device(self, device=config.bert_gen_config.device):
if (
sys.platform == "darwin"
and torch.backends.mps.is_available()
and device == "cpu"
):
device = "mps"
if not device:
device = "cuda"
return device
def _prepare(self):
self.device = self._get_device()
if self.language == "EN":
self.tokenizer = DebertaV2Tokenizer.from_pretrained(self.model_path)
self.model = DebertaV2Model.from_pretrained(self.model_path).to(self.device)
else:
self.tokenizer = AutoTokenizer.from_pretrained(self.model_path)
self.model = AutoModelForMaskedLM.from_pretrained(self.model_path).to(
self.device
)
self.model.eval()
def get_bert_feature(self, text, word2ph):
if self.language == "JP":
| text = "".join(text2sep_kata(text)[0]) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: chinhsuanwu/ifusion-threestudio
# Path: threestudio/models/materials/base.py
class BaseMaterial(BaseModule):
@dataclass
class Config(BaseModule.Config):
pass
cfg: Config
requires_normal: bool = False
requires_tangent: bool = False
def configure(self):
pass
def forward(self, *args, **kwargs) -> Float[Tensor, "*B 3"]:
raise NotImplementedError
def export(self, *args, **kwargs) -> Dict[str, Any]:
return {}
# Path: threestudio/models/networks.py
def get_encoding(n_input_dims: int, config) -> nn.Module:
# input suppose to be range [0, 1]
encoding: nn.Module
if config.otype == "ProgressiveBandFrequency":
encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))
elif config.otype == "ProgressiveBandHashGrid":
encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))
elif config.otype == "HashGridSpatialTime":
encoding = TCNNEncodingSpatialTime(n_input_dims, config) # 4D-fy encoding
else:
encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))
encoding = CompositeEncoding(
encoding,
include_xyz=config.get("include_xyz", False),
xyz_scale=2.0,
xyz_offset=-1.0,
) # FIXME: hard coded
return encoding
# Path: threestudio/models/networks.py
def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:
network: nn.Module
if config.otype == "VanillaMLP":
network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))
elif config.otype == "SphereInitVanillaMLP":
network = SphereInitVanillaMLP(
n_input_dims, n_output_dims, config_to_primitive(config)
)
else:
assert (
config.get("sphere_init", False) is False
), "sphere_init=True only supported by VanillaMLP"
network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))
return network
# Path: threestudio/utils/ops.py
def dot(x, y):
return torch.sum(x * y, -1, keepdim=True)
# Path: threestudio/utils/ops.py
def get_activation(name) -> Callable:
if name is None:
return lambda x: x
name = name.lower()
if name == "none":
return lambda x: x
elif name == "lin2srgb":
return lambda x: torch.where(
x > 0.0031308,
torch.pow(torch.clamp(x, min=0.0031308), 1.0 / 2.4) * 1.055 - 0.055,
12.92 * x,
).clamp(0.0, 1.0)
elif name == "exp":
return lambda x: torch.exp(x)
elif name == "shifted_exp":
return lambda x: torch.exp(x - 1.0)
elif name == "trunc_exp":
return trunc_exp
elif name == "shifted_trunc_exp":
return lambda x: trunc_exp(x - 1.0)
elif name == "sigmoid":
return lambda x: torch.sigmoid(x)
elif name == "tanh":
return lambda x: torch.tanh(x)
elif name == "shifted_softplus":
return lambda x: F.softplus(x - 1.0)
elif name == "scale_-11_01":
return lambda x: x * 0.5 + 0.5
else:
try:
return getattr(F, name)
except AttributeError:
raise ValueError(f"Unknown activation function: {name}")
# Path: threestudio/models/materials/no_material.py
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio
from dataclasses import dataclass, field
from threestudio.models.materials.base import BaseMaterial
from threestudio.models.networks import get_encoding, get_mlp
from threestudio.utils.ops import dot, get_activation
from threestudio.utils.typing import *
@threestudio.register("no-material")
class NoMaterial(BaseMaterial):
@dataclass
class Config(BaseMaterial.Config):
n_output_dims: int = 3
color_activation: str = "sigmoid"
input_feature_dims: Optional[int] = None
mlp_network_config: Optional[dict] = None
requires_normal: bool = False
cfg: Config
def configure(self) -> None:
self.use_network = False
if (
self.cfg.input_feature_dims is not None
and self.cfg.mlp_network_config is not None
):
self.network = get_mlp(
self.cfg.input_feature_dims,
self.cfg.n_output_dims,
self.cfg.mlp_network_config,
)
self.use_network = True
self.requires_normal = self.cfg.requires_normal
def forward(
self, features: Float[Tensor, "B ... Nf"], **kwargs
) -> Float[Tensor, "B ... Nc"]:
if not self.use_network:
assert (
features.shape[-1] == self.cfg.n_output_dims
), f"Expected {self.cfg.n_output_dims} output dims, only got {features.shape[-1]} dims input."
| color = get_activation(self.cfg.color_activation)(features) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: jasursadikov/mud
# Path: utils.py
TEXT = {
'white': '\033[37m',
'gray': '\033[90m',
'black': '\033[30m',
'red': '\033[31m',
'green': '\033[32m',
'yellow': '\033[33m',
'blue': '\033[34m',
'magenta': '\033[35m',
'cyan': '\033[36m',
'bright_white': '\033[97m',
'bright_red': '\033[91m',
'bright_green': '\033[92m',
'bright_yellow': '\033[93m',
'bright_blue': '\033[94m',
'bright_magenta': '\033[95m',
'bright_cyan': '\033[96m',
}
# Path: utils.py
BACK = {
'white': '\033[47m',
'medium_gray': '\033[100m',
'black': '\033[40m',
'red': '\033[41m',
'green': '\033[42m',
'yellow': '\033[43m',
'blue': '\033[44m',
'magenta': '\033[45m',
'cyan': '\033[46m',
'bright_white': '\033[107m',
'bright_red': '\033[101m',
'bright_green': '\033[102m',
'bright_yellow': '\033[103m',
'bright_blue': '\033[104m',
'bright_magenta': '\033[105m',
'bright_cyan': '\033[106m',
}
# Path: utils.py
RESET = '\033[0m'
# Path: utils.py
STYLES = {
'bold': '\033[1m',
'dim': '\033[2m',
'italic': '\033[3m',
'underline': '\033[4m',
'blink': '\033[5m',
}
# Path: utils.py
END_STYLES = {
'bold': '\033[22m',
'dim': '\033[22m',
'italic': '\033[23m',
'underline': '\033[24m',
'blink': '\033[25m',
}
# Path: utils.py
def glyph(key: str) -> str:
return GLYPHS[key][0] if settings.mud_settings['nerd_fonts'] else GLYPHS[key][1]
# Path: commands.py
import utils
import asyncio
import subprocess
from utils import TEXT, BACK, RESET, STYLES, END_STYLES, glyph
from typing import List, Dict
from collections import Counter
from prettytable import PrettyTable, PLAIN_COLUMNS
class Commands:
def __init__(self, repos):
self.repos = repos
self.label_color_cache = {}
self.current_color_index = 0
# `mud status` command implementation
def status(self, repos: Dict[str, List[str]]) -> None:
table = self._get_table()
for path, tags in repos.items():
formatted_path = self._get_formatted_path(path)
branch = self._get_branch_status(path)
author = self._get_authors_name(path)
commit = self._get_commit_message(path, 30)
colored_labels = self._get_formatted_labels(tags)
# Sync with origin status
ahead_behind_cmd = subprocess.run(['git', 'rev-list', '--left-right', '--count', 'HEAD...@{upstream}'],
text=True, cwd=path, capture_output=True)
stdout = ahead_behind_cmd.stdout.strip().split()
if len(stdout) >= 2:
ahead, behind = stdout[0], stdout[1]
origin_sync = ''
if ahead and ahead != '0':
| origin_sync += f'{TEXT["bright_green"]}{glyph("ahead")} {ahead}{RESET}' |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Q-MM/PureMM
# Path: model/multimodal_encoder/builder.py
def build_vision_tower(vision_tower_cfg, **kwargs):
vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None))
is_absolute_path_exists = os.path.exists(vision_tower)
if is_absolute_path_exists or vision_tower.startswith("openai") or vision_tower.startswith("laion"):
return CLIPVisionTower(vision_tower, args=vision_tower_cfg, **kwargs)
raise ValueError(f'Unknown vision tower: {vision_tower}')
# Path: model/multimodal_projector/builder.py
def build_vision_projector(config, delay_load=False, **kwargs):
projector_type = getattr(config, 'mm_projector_type', 'linear')
if projector_type == 'linear':
return nn.Linear(config.mm_hidden_size, config.hidden_size)
mlp_gelu_match = re.match(r'^mlp(\d+)x_gelu$', projector_type)
if mlp_gelu_match:
mlp_depth = int(mlp_gelu_match.group(1))
modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)]
for _ in range(1, mlp_depth):
modules.append(nn.GELU())
modules.append(nn.Linear(config.hidden_size, config.hidden_size))
return nn.Sequential(*modules)
larger_mlp_gelu_match = re.match(r'^larger_mlp(\d+)x_gelu$', projector_type)
if larger_mlp_gelu_match:
mlp_depth = int(mlp_gelu_match.group(1))
modules = [nn.Linear(config.mm_hidden_size, config.mm_hidden_size)]
for _ in range(1, mlp_depth-1):
modules.append(nn.GELU())
modules.append(nn.Linear(config.mm_hidden_size, config.mm_hidden_size))
modules.append(nn.Linear(config.mm_hidden_size, config.hidden_size))
return nn.Sequential(*modules)
if projector_type == 'identity':
return IdentityMap()
raise ValueError(f'Unknown projector type: {projector_type}')
# Path: model/PureMM_arch.py
from abc import ABC, abstractmethod
from .multimodal_encoder.builder import build_vision_tower
from .multimodal_projector.builder import build_vision_projector
import torch
import torch.nn as nn
# Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
IGNORE_INDEX = -100
IMAGE_TOKEN_INDEX = -200
DEFAULT_IMAGE_TOKEN = "<image>"
DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
DEFAULT_IM_START_TOKEN = "<im_start>"
DEFAULT_IM_END_TOKEN = "<im_end>"
def rank0_print(rank, *args):
if rank == 0:
print(*args)
class PureMMMetaModel:
def __init__(self, config):
super(PureMMMetaModel, self).__init__(config)
if hasattr(config, "mm_vision_tower"):
self.vision_tower = build_vision_tower(config, delay_load=True)
# self.mm_projector = nn.Linear(config.mm_hidden_size, config.hidden_size)
| self.mm_projector = build_vision_projector(config) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Ananya2001-an/spotify-py-sdk
# Path: spotify_py_sdk/spotify_api.py
class SpotifyApi:
"""Create an api instance and call the various endpoint methods.
:param client_id: Client_ID for your app
:type client_id: str
:param client_secret: Client_Secret for your app
:type client_secret: str
:param config: pass :class:`SdkConfig` instance, defaults to None
:type config: :class:`SdkConfig`, optional
"""
_root_url: str = "https://api.spotify.com/v1/"
def __init__(self, client_id: str, client_secret: str, config: Optional[SdkConfig] = None):
"""Constructor method
"""
self.access_token_manager: AccessTokenManager = AccessTokenManager(client_id, client_secret)
self.sdk_config: Optional[SdkConfig] = config
self.albums: Albums = Albums(self)
self.artists: Artists = Artists(self)
self.audiobooks: Audiobooks = Audiobooks(self)
self.browse: Browse = Browse(self)
self.chapters: Chapters = Chapters(self)
self.episodes: Episodes = Episodes(self)
self.recommendations: Recommendations = Recommendations(self)
self.markets: Markets = Markets(self)
# self.player: Player = Player(self) # need different auth strategy; yet to be implemented
self.playlists: Playlists = Playlists(self)
self.shows: Shows = Shows(self)
self.tracks: Tracks = Tracks(self)
self.users: Users = Users(self)
self.search: Search = Search(self)
# self.current_user: CurrentUser = CurrentUser(self) # need different auth strategy; yet to be implemented
@classmethod
def fetch_results(cls, url: str, opts: dict):
"""Fetch results by making a request to the given URL
"""
try:
result = requests.request(method=opts["method"], url=url, headers=opts["headers"], data=opts["body"])
return result.json()
except HTTPError as e:
raise f"Failed to fetch result! {e}"
def make_request(self, method: Literal["GET", "POST", "PUT", "DELETE"], url: str, body: Optional[any] = None,
content_type: Optional[str] = None):
"""Get access token and make necessary request call to the api endpoint
"""
try:
access_token = self.access_token_manager.get_access_token()
except HTTPError as e:
raise "Access Token not available! Authenticate again."
full_url = SpotifyApi._root_url + url
opts = {
"method": method,
"headers": {
"Authorization": f"Bearer {access_token}",
"Content-Type": content_type if content_type else "application/json"
},
"body": json.dumps(body) if body and type(body) is not str else body
}
try:
if self.sdk_config:
if self.sdk_config.before_request:
self.sdk_config.before_request(full_url, opts)
if self.sdk_config.fetch:
result = self.sdk_config.fetch(full_url, opts)
else:
result = SpotifyApi.fetch_results(full_url, opts)
if self.sdk_config.after_request:
self.sdk_config.after_request(full_url, opts, result)
return result
return SpotifyApi.fetch_results(full_url, opts)
except (HTTPError, ValueError, InterruptedError) as e:
raise e
# handled = self.sdk_config.error_handler.handleErrors(e)
# if not handled:
# raise Exception("Failed to make request! Try again.")
# Path: spotify_py_sdk/endpoints/recommendations.py
class RecommendationsRequestRequiredArguments:
def __init__(self, seed_artists: Optional[list[str]] = None, seed_genres: Optional[list[str]] = None, seed_tracks: Optional[list[str]] = None):
self.seed_artists = seed_artists
self.seed_genres = seed_genres
self.seed_tracks = seed_tracks
# Path: tests/endpoints/test_recommendations.py
import json
import pytest
import os
from spotify_py_sdk import SpotifyApi
from spotify_py_sdk.endpoints.recommendations import RecommendationsRequestRequiredArguments
from dotenv import load_dotenv
load_dotenv()
@pytest.fixture
def api():
| return SpotifyApi(os.getenv("CLIENT_ID"), os.getenv("CLIENT_SECRET")) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: kyleliang919/Optimizer-Zoo
# Path: optimizer_zoo/Trainer/async_trainer.py
class AsyncTrainer(Trainer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.accelerator.sync_gradients = None
def training_step(self, model, inputs):
# make sure the gradient is not automatically synced
with model.no_sync():
model.train()
inputs = self._prepare_inputs(inputs)
if is_sagemaker_mp_enabled():
loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps)
return loss_mb.reduce_mean().detach().to(self.args.device)
with self.compute_loss_context_manager():
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
self.accelerator.backward(loss)
return loss.detach() / self.args.gradient_accumulation_steps
# Path: optimizer_zoo/Trainer/async_trainer.py
class AsyncSFTTrainer(SFTTrainer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def training_step(self, model, inputs):
# make sure the gradient is not automatically synced
with model.no_sync():
model.train()
inputs = self._prepare_inputs(inputs)
if is_sagemaker_mp_enabled():
loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps)
return loss_mb.reduce_mean().detach().to(self.args.device)
with self.compute_loss_context_manager():
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
self.accelerator.backward(loss)
return loss.detach() / self.args.gradient_accumulation_steps
# Path: optimizer_zoo/Trainer/async_trainer.py
class AsyncDPOTrainer(DPOTrainer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def training_step(self, model, inputs):
# make sure the gradient is not automatically synced
with model.no_sync():
model.train()
inputs = self._prepare_inputs(inputs)
if is_sagemaker_mp_enabled():
loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps)
return loss_mb.reduce_mean().detach().to(self.args.device)
with self.compute_loss_context_manager():
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
self.accelerator.backward(loss)
return loss.detach() / self.args.gradient_accumulation_steps
# Path: optimizer_zoo/Trainer/async_trainer.py
class AsyncSeq2SeqTrainer(Seq2SeqTrainer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.accelerator.sync_gradients = None
def training_step(self, model, inputs):
# make sure the gradient is not automatically synced
with model.no_sync():
model.train()
inputs = self._prepare_inputs(inputs)
if is_sagemaker_mp_enabled():
loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps)
return loss_mb.reduce_mean().detach().to(self.args.device)
with self.compute_loss_context_manager():
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
self.accelerator.backward(loss)
return loss.detach() / self.args.gradient_accumulation_steps
# Path: optimizer_zoo/Trainer/utils.py
from transformers import Trainer, Seq2SeqTrainer
from trl import SFTTrainer, DPOTrainer
from .async_trainer import AsyncTrainer, AsyncSFTTrainer, AsyncDPOTrainer, AsyncSeq2SeqTrainer
def create_trainer(training_args):
if training_args.task == "pretraining":
return AsyncTrainer if training_args.async_grad else Trainer
elif training_args.task == "sft":
return AsyncSFTTrainer if training_args.async_grad else SFTTrainer
elif training_args.task == "dpo":
return AsyncDPOTrainer if training_args.async_grad else DPOTrainer
elif training_args.task == "seq2seq":
| return AsyncSeq2SeqTrainer if training_args.async_grad else Seq2SeqTrainer |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: giaminhgist/3D-DAM
# Path: lib/model/attention_block.py
class SpatialAttention3D(nn.Module):
def __init__(self, out_channel=64, kernel_size=3, stride=1, padding=1):
super(SpatialAttention3D, self).__init__()
self.conv = nn.Conv3d(2, out_channel,
kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
residual = x
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avg_out, max_out], dim=1)
x = self.conv(x)
x = self.sigmoid(x)
out = x * residual
return out
# Path: lib/model/attention_block.py
class ChannelAttention3D(nn.Module):
def __init__(self, in_planes=64, ratio=8):
super(ChannelAttention3D, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool3d(1)
self.max_pool = nn.AdaptiveMaxPool3d(1)
self.fc = nn.Sequential(nn.Conv3d(in_planes, in_planes // ratio, 1, bias=False),
nn.ReLU(),
nn.Conv3d(in_planes // ratio, in_planes, 1, bias=False))
self.sigmoid = nn.Sigmoid()
def forward(self, x):
residual = x
avg_out = self.fc(self.avg_pool(x))
max_out = self.fc(self.max_pool(x))
out = avg_out + max_out
return self.sigmoid(out) * residual
# Path: lib/model/attention_block.py
class residual_block(nn.Module):
def __init__(self, channel_size=64):
super(residual_block, self).__init__()
self.conv = nn.Conv3d(channel_size, channel_size, kernel_size=3, padding=1)
self.relu = nn.ReLU()
self.bn = nn.BatchNorm3d(channel_size)
def forward(self, x):
residual = x
y = self.conv(x)
y = self.bn(y)
y = self.relu(y)
out = y + residual
return out
# Path: lib/model/DuoAttention.py
import numpy as np
import torch
from torch import nn
from lib.model.attention_block import SpatialAttention3D, ChannelAttention3D, residual_block
class DAM(nn.Module):
def __init__(self, channels=64):
super(DAM, self).__init__()
self.sa = SpatialAttention3D(out_channel=channels)
self.ca = ChannelAttention3D(in_planes=channels)
def forward(self, x):
residual = x
out = self.ca(x)
out = self.sa(out)
out = out + residual
return out
class Duo_Attention(nn.Module):
def __init__(
self, input_size=(1, 169, 208, 179), num_classes=3, dropout=0
):
super().__init__()
self.conv = nn.Sequential(
nn.Conv3d(input_size[0], 8, 3, padding=1),
nn.BatchNorm3d(8),
nn.ReLU(),
# nn.MaxPool3d(2, 2),
nn.Conv3d(8, 16, 3, padding=1, stride=2),
nn.BatchNorm3d(16),
nn.ReLU(),
| residual_block(channel_size=16), |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: itsluminous/EasyEncryption
# Path: core.py
def generate_key():
"""Generate a Fernet key."""
return Fernet.generate_key()
# Path: core.py
def encrypt_message(message, key):
"""Encrypt a message using the provided key."""
fernet = Fernet(key)
encrypted = fernet.encrypt(message.encode())
return encrypted
# Path: core.py
def decrypt_message(encrypted_message, key):
"""Decrypt an encrypted message using the provided key."""
fernet = Fernet(key)
decrypted = fernet.decrypt(encrypted_message).decode()
return decrypted
# Path: core.py
def encrypt_file(file_path, key):
"""Encrypt a file using the provided key."""
try:
with open(file_path, 'r', encoding='utf-8') as file:
content = file.read()
encrypted_content = encrypt_message(content, key)
with open(file_path + '.enc', 'wb') as encrypted_file:
encrypted_file.write(encrypted_content)
print(f"\nFile '{file_path}' encrypted successfully.")
except FileNotFoundError:
print("\nFile not found.")
# Path: core.py
def decrypt_file(file_path, key):
"""Decrypt an encrypted file using the provided key."""
try:
with open(file_path, 'rb', encoding='utf-8') as file:
encrypted_content = file.read()
decrypted_content = decrypt_message(encrypted_content, key)
decrypted_file_path = file_path[:-4] # Remove the '.enc' extension
with open(decrypted_file_path, 'w', encoding='utf-8') as decrypted_file:
decrypted_file.write(decrypted_content)
print(f"\nFile '{file_path}' decrypted successfully.")
except FileNotFoundError:
print("\nFile not found.")
except ValueError:
print("\nInvalid decryption key or file content.")
# Path: script.py
from core import generate_key, encrypt_message, decrypt_message, encrypt_file, decrypt_file
"""
Script providing a user interface for encryption and decryption operations.
"""
def generate_new_key():
"""
Generate a new encryption key.
Returns:
- bytes: New encryption key.
"""
key = generate_key()
print(f"\nGenerated Key: {key.decode()}")
return key
def enter_user_key():
"""
Prompt user to enter a key.
Returns:
- bytes: User-entered key.
"""
print("\nEnter the key:")
return input().encode()
def encrypt_user_message(key):
"""
Encrypt a user-entered message.
Parameters:
- key (bytes): Encryption key.
"""
if key is None:
print("\nPlease generate or enter a key first.")
else:
print("\nEnter a message to encrypt (press Enter twice to finish):")
lines = []
while True:
line = input()
if not line:
break
lines.append(line)
user_input = '\n'.join(lines)
encrypted_message = encrypt_message(user_input, key)
print(f"\nEncrypted message: {encrypted_message}")
def decrypt_user_message(key):
"""
Decrypt a user-entered message.
Parameters:
- key (bytes): Decryption key.
"""
if key is None:
print("\nPlease generate or enter a key first.")
else:
print("\nEnter the encrypted message (press Enter twice to finish):")
lines = []
while True:
line = input()
if not line:
break
lines.append(line)
encrypted_input = '\n'.join(lines)
| decrypted_message = decrypt_message(encrypted_input.encode(), key) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: gardenifi/server
# Path: app/main_app.py
INVALID_DATA = "Invalid data: Unable to process the provided data"
class GlobalVars:
class WifiData(BaseModel):
class ValveData(BaseModel):
class BleData(BaseModel):
def __init__(self):
def refresh_set(self):
def refresh_set(self, value):
async def index():
async def resource_not_found(request: Request, exc: HTTPException):
async def read_ble_data(page: int = None):
async def write_ble_data(data: BleData):
async def discover_wifi(chunked: int = None, page: int = None):
async def save_wifi(data: WifiData):
async def turn(data: ValveData):
async def check_mqtt():
def web_server():
def setup_gpio():
def parse_arguments():
def main():
# Path: app/main_app.py
@app.exception_handler(404)
async def resource_not_found(request: Request, exc: HTTPException):
"""Not found error."""
logger.error(f"Request: {request}")
return JSONResponse(status_code=404, content={"detail": str(exc.detail)})
# Path: tests/api/resource_not_found_test.py
import json
import pytest
from fastapi.testclient import TestClient
from fastapi import HTTPException, Request
from fastapi.responses import JSONResponse
from app.main_app import app
from app.main_app import resource_not_found
"""MIT License
Copyright (c) 2023, Marios Karagiannopoulos
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
**Attribution Requirement:**
When using or distributing the software, an attribution to Marios Karagiannopoulos must be included.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
client = TestClient(app)
scope = {"type": "http", "http_version": "1.1", "method": "GET", "path": "/"}
@pytest.fixture(scope="function")
async def request_obj():
"""Request object creation fixture"""
return Request(scope)
class TestResourceNotFound:
"""
Test class for the 'resource_not_found' error handler function.
"""
@pytest.mark.asyncio
async def test_returns_json_response_with_status_code_404_and_detail_of_httpexception(self, obj=request_obj):
"""
Test for returning a JSONResponse object with status code 404 and the detail of the HTTPException passed as an argument.
"""
exc = HTTPException(status_code=404, detail="Not found")
| response = await resource_not_found(obj, exc) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: xiaoye0x0/pfgo_tg_bot
# Path: utils/task/model.py
class Task(metaclass=SingletonMeta):
def __init__(self, args) -> None:
self.conf_file = args.config
self.bot_token: str = ""
self.pfgo_url: str = ""
self.username: str = ""
self.password: str = ""
self.hide: list = []
self.webhook_url = ""
self.webhook_port = ""
self.running_host = ""
self.running_port = 0
self._init_conf()
def _init_conf(self):
config = configparser.ConfigParser()
config.read(self.conf_file)
self.bot_token = config.get("bot", "token")
self.pfgo_url = config.get("pfgo", "url")
self.username = config.get("pfgo", "username")
self.password = config.get("pfgo", "password")
self.hide += config.get("pfgo", "hide").split(",")
self.webhook_url = config.get("webhook", "webhook_url")
self.webhook_port = config.get("webhook", "webhook_port")
self.running_host = config.get("webhook", "running_host")
self.running_port = int(config.get("webhook", "running_port"))
# Path: utils/log.py
class Logmanager(metaclass=SingletonMeta):
log_list = []
log_list_lock = threading.Lock()
path = "./"
def __init__(self, path: str) -> None:
Logmanager.path = path
@classmethod
def create_logger(cls, name=None):
if name is None:
name = "default"
logger = logging.getLogger(name)
if name not in cls.log_list:
with Logmanager.log_list_lock:
if name not in cls.log_list:
cls.log_list.append(name)
logger.setLevel(logging.INFO)
logfile = f"{Logmanager.path}/log.log"
fh = RotatingFileHandler(
logfile,
mode="a",
maxBytes=1024 * 1024 * 10,
backupCount=2,
encoding="utf-8",
)
formatter = logging.Formatter(
"[%(name)s] [%(asctime)s] [%(levelname)s] %(message)s",
"%Y%m%d-%H:%M:%S",
)
fh.setFormatter(formatter)
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(ch)
fh.close()
ch.close()
return logger
# Path: utils/task/set_args.py
import os
import argparse
from .model import Task
from ..log import Logmanager
def is_file_exists(file_path) -> bool:
r = os.path.exists(file_path)
if not r:
LOGGER.error(f"文件{file_path}不存在")
return r
def create_folder_if_not_exists(folder_path):
if not folder_path:
return
if not os.path.exists(folder_path):
os.makedirs(folder_path)
def parse_command_line_args():
"""
-c --config: 配置文件
--log: 日志存放位置
"""
parser = argparse.ArgumentParser(description="运行参数")
parser.add_argument("--config", "-c", type=str, default="./config.ini", help="配置文件")
parser.add_argument("--log", type=str, default="./", help="日志存放文件夹的位置,默认放到当前路径")
args = parser.parse_args()
# 初始化日志模块
global LOGGER
create_folder_if_not_exists(args.log)
| Logmanager(args.log) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: shibing624/chatgpt-webui
# Path: src/config.py
def retrieve_openai_api(api_key=None):
def retrieve_proxy(proxy=None):
def update_doc_config(two_column_pdf):
# Path: src/presets.py
OPENAI_API_BASE = "https://api.openai.com/v1"
# Path: src/utils.py
def excel_to_string(file_path):
# 读取Excel文件中的所有工作表
excel_file = pd.read_excel(file_path, engine="openpyxl", sheet_name=None)
# 初始化结果字符串
result = []
# 遍历每一个工作表
for sheet_name, sheet_data in excel_file.items():
# 处理当前工作表并添加到结果字符串
result += sheet_to_string(sheet_data, sheet_name=sheet_name)
return result
# Path: src/utils.py
def get_files_hash(file_src=None, file_paths=None):
if file_src:
file_paths = [x.name for x in file_src]
file_paths.sort(key=lambda x: os.path.basename(x))
md5_hash = hashlib.md5()
for file_path in file_paths:
with open(file_path, "rb") as f:
while chunk := f.read(8192):
md5_hash.update(chunk)
return md5_hash.hexdigest()
# Path: src/utils.py
def load_pkl(file_path):
with open(file_path, 'rb') as f:
data = pickle.load(f)
return data
# Path: src/utils.py
def save_pkl(data, file_path):
with open(file_path, 'wb') as f:
pickle.dump(data, f)
# Path: src/index_func.py
import os
import re
import PyPDF2
from typing import List, Optional, Any
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from loguru import logger
from tqdm import tqdm
from src.config import local_embedding, retrieve_proxy, chunk_overlap, chunk_size, hf_emb_model_name
from src.presets import OPENAI_API_BASE
from src.utils import excel_to_string, get_files_hash, load_pkl, save_pkl
from src.pdf_func import parse_pdf
from src.config import advance_docs
from langchain.document_loaders import UnstructuredWordDocumentLoader
from langchain.document_loaders import UnstructuredPowerPointLoader
from langchain.document_loaders import UnstructuredEPubLoader
from langchain.document_loaders import TextLoader
from langchain.vectorstores import FAISS
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.embeddings import OpenAIEmbeddings
pwd_path = os.path.abspath(os.path.dirname(__file__))
class ChineseRecursiveTextSplitter(RecursiveCharacterTextSplitter):
"""Recursive text splitter for Chinese text.
copy from: https://github.com/chatchat-space/Langchain-Chatchat/tree/master
"""
def __init__(
self,
separators: Optional[List[str]] = None,
keep_separator: bool = True,
is_separator_regex: bool = True,
**kwargs: Any,
) -> None:
"""Create a new TextSplitter."""
super().__init__(keep_separator=keep_separator, **kwargs)
self._separators = separators or [
"\n\n",
"\n",
"。|!|?",
"\.\s|\!\s|\?\s",
";|;\s",
",|,\s"
]
self._is_separator_regex = is_separator_regex
@staticmethod
def _split_text_with_regex_from_end(
text: str, separator: str, keep_separator: bool
) -> List[str]:
# Now that we have the separator, split the text
if separator:
if keep_separator:
# The parentheses in the pattern keep the delimiters in the result.
_splits = re.split(f"({separator})", text)
splits = ["".join(i) for i in zip(_splits[0::2], _splits[1::2])]
if len(_splits) % 2 == 1:
splits += _splits[-1:]
else:
splits = re.split(separator, text)
else:
splits = list(text)
return [s for s in splits if s != ""]
def _split_text(self, text: str, separators: List[str]) -> List[str]:
"""Split incoming text and return chunks."""
final_chunks = []
# Get appropriate separator to use
separator = separators[-1]
new_separators = []
for i, _s in enumerate(separators):
_separator = _s if self._is_separator_regex else re.escape(_s)
if _s == "":
separator = _s
break
if re.search(_separator, text):
separator = _s
new_separators = separators[i + 1:]
break
_separator = separator if self._is_separator_regex else re.escape(separator)
splits = self._split_text_with_regex_from_end(text, _separator, self._keep_separator)
# Now go merging things, recursively splitting longer texts.
_good_splits = []
_separator = "" if self._keep_separator else separator
for s in splits:
if self._length_function(s) < self._chunk_size:
_good_splits.append(s)
else:
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
_good_splits = []
if not new_separators:
final_chunks.append(s)
else:
other_info = self._split_text(s, new_separators)
final_chunks.extend(other_info)
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
return [re.sub(r"\n{2,}", "\n", chunk.strip()) for chunk in final_chunks if chunk.strip() != ""]
def get_documents(file_paths):
| text_splitter = ChineseRecursiveTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ConnectAI-E/GitMaya
# Path: server/tasks/lark/base.py
def get_bot_by_application_id(app_id):
application = (
db.session.query(IMApplication)
.filter(
or_(
IMApplication.app_id == app_id,
IMApplication.id == app_id,
)
)
.first()
)
if application:
return (
Bot(
app_id=application.app_id,
app_secret=application.app_secret,
),
application,
)
return None, None
# Path: server/tasks/lark/base.py
def get_git_object_by_message_id(message_id):
"""
根据message_id区分Repo、Issue、PullRequest对象
参数:
message_id:消息ID
返回值:
repo:Repo对象,如果存在
issue:Issue对象,如果存在
pr:PullRequest对象,如果存在
"""
issue = (
db.session.query(Issue)
.filter(
Issue.message_id == message_id,
)
.first()
)
if issue:
return None, issue, None
pr = (
db.session.query(PullRequest)
.filter(
PullRequest.message_id == message_id,
)
.first()
)
if pr:
return None, None, pr
repo = (
db.session.query(Repo)
.filter(
Repo.message_id == message_id,
)
.first()
)
if repo:
return repo, None, None
return None, None, None
# Path: server/tasks/lark/base.py
def with_authenticated_github():
def decorate(func):
@wraps(func)
def wrapper(*args, **kwargs):
"""
1. 这个装饰器用来统一处理错误消息
2. github rest api调用出错的时候抛出异常
3. 这个装饰器捕获特定的异常,给操作者特定的报错消息
"""
try:
return func(*args, **kwargs)
except GitHubPermissionError as e:
try:
from .manage import send_manage_fail_message
app_id, message_id, content, raw_message = args[-4:]
host = os.environ.get("DOMAIN")
send_manage_fail_message(
f"[请点击绑定 GitHub 账号后重试]({host}/api/github/oauth)",
app_id,
message_id,
content,
raw_message,
)
except Exception as e:
logging.error(e)
except Exception as e:
raise e
return wrapper
return decorate
# Path: server/tasks/lark/pull_request.py
import json
import logging
from celery_app import app, celery
from connectai.lark.sdk import FeishuTextMessage
from model.schema import (
ChatGroup,
CodeApplication,
CodeUser,
IMUser,
PullRequest,
Repo,
Team,
TeamMember,
db,
)
from model.team import get_assignees_by_openid
from utils.github.repo import GitHubAppRepo
from utils.lark.pr_card import PullCard
from utils.lark.pr_manual import (
PrManual,
PullRequestDiff,
PullRequestLog,
PullRequestView,
)
from utils.lark.pr_tip_failed import PrTipFailed
from utils.lark.pr_tip_success import PrTipSuccess
from .base import (
get_bot_by_application_id,
get_git_object_by_message_id,
with_authenticated_github,
)
@celery.task()
def send_pull_request_failed_tip(
content, app_id, message_id, *args, bot=None, **kwargs
):
"""send new card message to user.
Args:
app_id: IMApplication.app_id.
message_id: lark message id.
content: error message
"""
if not bot:
| bot, _ = get_bot_by_application_id(app_id) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: camenduru/AnyDoor-online-hf
# Path: dinov2/dinov2/layers/attention.py
class Attention(nn.Module):
def __init__(
self,
dim: int,
num_heads: int = 8,
qkv_bias: bool = False,
proj_bias: bool = True,
attn_drop: float = 0.0,
proj_drop: float = 0.0,
) -> None:
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim**-0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim, bias=proj_bias)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x: Tensor) -> Tensor:
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0] * self.scale, qkv[1], qkv[2]
attn = q @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
# Path: dinov2/dinov2/layers/attention.py
class MemEffAttention(Attention):
def forward(self, x: Tensor, attn_bias=None) -> Tensor:
if not XFORMERS_AVAILABLE:
assert attn_bias is None, "xFormers is required for nested tensors usage"
return super().forward(x)
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)
q, k, v = unbind(qkv, 2)
if attn_bias is not None:
self_att_op = fmha.MemoryEfficientAttentionFlashAttentionOp
else:
self_att_op = None
x = memory_efficient_attention(q, k, v, attn_bias=attn_bias, op=self_att_op)
x = x.reshape([B, N, C])
x = self.proj(x)
x = self.proj_drop(x)
return x
# Path: dinov2/dinov2/layers/drop_path.py
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
# Path: dinov2/dinov2/layers/layer_scale.py
class LayerScale(nn.Module):
def __init__(
self,
dim: int,
init_values: Union[float, Tensor] = 1e-5,
inplace: bool = False,
) -> None:
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x: Tensor) -> Tensor:
return x.mul_(self.gamma) if self.inplace else x * self.gamma
# Path: dinov2/dinov2/layers/mlp.py
class Mlp(nn.Module):
def __init__(
self,
in_features: int,
hidden_features: Optional[int] = None,
out_features: Optional[int] = None,
act_layer: Callable[..., nn.Module] = nn.GELU,
drop: float = 0.0,
bias: bool = True,
) -> None:
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features, bias=bias)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features, bias=bias)
self.drop = nn.Dropout(drop)
def forward(self, x: Tensor) -> Tensor:
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
# Path: dinov2/dinov2/layers/block.py
import logging
import torch
from typing import Callable, List, Any, Tuple, Dict
from torch import nn, Tensor
from .attention import Attention, MemEffAttention
from .drop_path import DropPath
from .layer_scale import LayerScale
from .mlp import Mlp
from xformers.ops import fmha
from xformers.ops import scaled_index_add, index_select_cat
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# References:
# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py
# https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/patch_embed.py
logger = logging.getLogger("dinov2")
try:
XFORMERS_AVAILABLE = True
except ImportError:
logger.warning("xFormers not available")
XFORMERS_AVAILABLE = False
class Block(nn.Module):
def __init__(
self,
dim: int,
num_heads: int,
mlp_ratio: float = 4.0,
qkv_bias: bool = False,
proj_bias: bool = True,
ffn_bias: bool = True,
drop: float = 0.0,
attn_drop: float = 0.0,
init_values=None,
drop_path: float = 0.0,
act_layer: Callable[..., nn.Module] = nn.GELU,
norm_layer: Callable[..., nn.Module] = nn.LayerNorm,
attn_class: Callable[..., nn.Module] = Attention,
| ffn_layer: Callable[..., nn.Module] = Mlp, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: OmchainFoundation/evm-indexer
# Path: evm_indexer/fetcher.py
class Fetcher:
def __init__(self, node_endpoint, is_poa=True):
self.web3 = Web3(Web3.HTTPProvider(node_endpoint))
if is_poa:
self.web3.middleware_onion.inject(geth_poa_middleware, layer=0)
if not self.web3.is_connected():
raise ConnectionError('Could not connect to node at {}'.format(node_endpoint))
def fetch_block(self, block_number):
try:
return self.web3.eth.get_block(block_number, full_transactions=True)
except Exception as e:
return None
def fetch_latest_block_number(self):
return self.web3.eth.block_number
def fetch_blocks_in_range(self, start_block, end_block):
blocks = []
for block_number in range(start_block, end_block + 1):
block = self.fetch_block(block_number)
if block:
blocks.append(block)
return blocks
def fetch_transactions_in_block(self, block_number):
block = self.fetch_block(block_number)
if block:
return block['transactions']
else:
return None
def fetch_transactions_in_range(self, start_block, end_block):
transactions = []
for block_number in range(start_block, end_block + 1):
print('Fetching block {}'.format(block_number))
block_transactions = self.fetch_transactions_in_block(block_number)
if block_transactions:
transactions.extend(block_transactions)
return transactions
# Path: evm_indexer/decoder.py
class Decoder:
def __init__(self, fetcher):
self.fetcher = fetcher
self.web3 = fetcher.web3
def get_erc20_transfers_from_tx(self, tx_receipt):
# Filter the logs for ERC20 Transfer events
transfer_events = []
for log in tx_receipt['logs']:
if log['topics'][0] == ERC20_TRANSFER_EVENT_SIGNATURE_HASH and len(log['topics']) == 3:
try:
from_address = self.web3.to_checksum_address('0x' + log['topics'][1][-40:])
to_address = self.web3.to_checksum_address('0x' + log['topics'][2][-40:])
token_address = log['address']
amount = Web3.to_int(hexstr=log['data'])
transfer_events.append({
'from': from_address,
'to': to_address,
'amount': amount,
'token_address': token_address
})
except BadFunctionCallOutput:
# Handle error if the log decoding fails
continue
return transfer_events
def get_native_transfers_from_tx(self, tx_hash):
tx = self.web3.eth.get_transaction(tx_hash)
value = tx['value']
if value == 0:
return []
from_address = self.web3.to_checksum_address(tx['from'])
to_address = self.web3.to_checksum_address(tx['to'])
return [{
'from': from_address,
'to': to_address,
'amount': value,
'token_address': None
}]
# Path: evm_indexer/internal_tracer.py
class InternalTracer:
def __init__(self, node_endpoint):
self.node_endpoint = node_endpoint
def get_tx_receipt(self, tx_hash):
try:
if type(tx_hash) != str:
tx_hash = Web3.to_hex(tx_hash)
headers = {'Content-Type': 'application/json'}
payload = {
"jsonrpc": "2.0",
"id": 1,
"method": "eth_getTransactionReceipt",
"params": [tx_hash]
}
response = requests.post(self.node_endpoint, headers=headers, data=json.dumps(payload))
if response.status_code == 200:
return response.json()
else:
return None
except Exception as e:
return None
def get_trace(self, tx_hash):
try:
headers = {'Content-Type': 'application/json'}
payload = {
"jsonrpc": "2.0",
"id": 1,
"method": "debug_traceTransaction",
"params": [
tx_hash,
]
}
response = requests.post(self.node_endpoint, headers=headers, data=json.dumps(payload))
if response.status_code == 200:
return response.json()
else:
return None
except Exception as e:
return None
def capture_internal_calls(self, trace_response, tx_receipt):
captured_calls = []
struct_logs = trace_response['result']['structLogs']
# Initial call from EOA to the contract
initiator_address = tx_receipt['from']
contract_address = tx_receipt['to'] # Contract being called
current_call = {'from': initiator_address, 'to': contract_address}
for log in struct_logs:
op = log['op']
stack = log['stack']
if op in ['CALL', 'CALLCODE', 'DELEGATECALL', 'STATICCALL']:
if len(stack) >= 7:
# Extract 'to' address and value from the stack
to_address = '0x' + stack[-2][-40:]
value = int(stack[-3], 16) if op == 'CALL' else 0 # Value is relevant only for CALL
captured_call = {'op': op, 'from': current_call['to'], 'to': to_address, 'value': value}
captured_calls.append(captured_call)
# Update the current call context
current_call['from'] = current_call['to']
current_call['to'] = to_address
return captured_calls
def calculate_net_changes(captured_calls):
net_changes = {}
for call in captured_calls:
if call['from'] not in net_changes:
net_changes[call['from']] = 0
if call['to'] not in net_changes:
net_changes[call['to']] = 0
net_changes[call['from']] -= call['value']
net_changes[call['to']] += call['value']
return net_changes
# Path: tests/test_range.py
import sys
import os
from evm_indexer.fetcher import Fetcher
from evm_indexer.decoder import Decoder
from evm_indexer.internal_tracer import InternalTracer
from web3 import Web3
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
NODE_URL = 'https://seed.omchain.io'
fetcher = Fetcher(NODE_URL, is_poa=True)
| decoder = Decoder(fetcher=fetcher) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: omkarcloud/google-scraper
# Path: src/write_output.py
def write_output(query, data, entity_type,transformer = kebab_case):
query_kebab = transformer(query)
make_folders(query_kebab)
csv_path = f"output/{query_kebab}/csv/"
json_path = f"output/{query_kebab}/json/"
create(data,[], csv_path, json_path, query_kebab,entity_type)
# Path: src/search.py
FAILED_DUE_TO_CREDITS_EXHAUSTED = "FAILED_DUE_TO_CREDITS_EXHAUSTED"
# Path: src/search.py
FAILED_DUE_TO_NO_KEY = "FAILED_DUE_TO_NO_KEY"
# Path: src/search.py
FAILED_DUE_TO_NOT_SUBSCRIBED = "FAILED_DUE_TO_NOT_SUBSCRIBED"
# Path: src/search.py
FAILED_DUE_TO_UNKNOWN_ERROR = "FAILED_DUE_TO_UNKNOWN_ERROR"
# Path: src/search.py
@request(**default_request_options)
def search(_, data, metadata):
if not metadata.get('key'):
return DontCache({
"data": None,
"error":FAILED_DUE_TO_NO_KEY
})
max_items = data['max']
url = "https://google-scraper.p.rapidapi.com/search/"
qp = {"query": data['query']}
params = {**qp, 'link':cl.join_link(url, query_params=qp)}
request_data = {**metadata, "params": params}
result = do_request(request_data)
initial_results = cl.select(result, 'data', 'results', default=[])
if not cl.select(result, 'error'):
more_results = cl.select(result, 'data', 'results', default=[])
print(f"Got {len(more_results)} more results")
while cl.select(result, 'data', 'next') and (max_items is None or len(initial_results) < max_items):
next = cl.select(result, 'data', 'next')
params = {**qp, 'link':next}
request_data = {**metadata, "params": params}
result = do_request(request_data)
if result.get('error'):
break
more_results = cl.select(result, 'data', 'results', default=[])
print(f"Got {len(more_results)} more results")
initial_results.extend(more_results)
if cl.select(result, 'error'):
return DontCache(result)
else:
if max_items is not None:
initial_results = initial_results[:max_items]
result['data']['results'] = initial_results
return result
# Path: src/google_scraper.py
from typing import List,Optional, Union, Dict
from botasaurus import bt
from .write_output import write_output
from .search import FAILED_DUE_TO_CREDITS_EXHAUSTED, FAILED_DUE_TO_NO_KEY,FAILED_DUE_TO_NOT_SUBSCRIBED, FAILED_DUE_TO_UNKNOWN_ERROR, search
def clean_data(social_details):
success, credits_exhausted, not_subscribed, unknown_error, no_key = [], [], [], [], []
for detail in social_details:
if detail.get("error") is None:
success.append(detail)
elif detail["error"] == FAILED_DUE_TO_CREDITS_EXHAUSTED:
credits_exhausted.append(detail)
elif detail["error"] == FAILED_DUE_TO_NOT_SUBSCRIBED:
not_subscribed.append(detail)
elif detail["error"] == FAILED_DUE_TO_UNKNOWN_ERROR:
unknown_error.append(detail)
elif detail["error"] == FAILED_DUE_TO_NO_KEY:
no_key.append(detail)
return success, credits_exhausted, not_subscribed, unknown_error, no_key
def print_data_errors(credits_exhausted, not_subscribed, unknown_error, no_key):
if credits_exhausted:
name = "queries" if len(credits_exhausted) > 1 else "query"
print(f"Could not get data for {len(credits_exhausted)} {name} due to credit exhaustion. Please consider upgrading your plan by visiting https://rapidapi.com/Chetan11dev/api/google-scraper/pricing to continue scraping data.")
if not_subscribed:
name = "queries" if len(not_subscribed) > 1 else "query"
print(f"Could not get data for {len(not_subscribed)} {name} as you are not subscribed to Google Scraper API. Please subscribe to a free plan by visiting https://rapidapi.com/Chetan11dev/api/google-scraper/pricing")
if unknown_error:
name = "queries" if len(unknown_error) > 1 else "query"
print(f"Could not get data for {len(unknown_error)} {name} due to Unknown Error.")
if no_key:
name = "queries" if len(no_key) > 1 else "query"
print(f"Could not get data for {len(no_key)} {name} as you are not subscribed to Google Scraper API. Please subscribe to a free plan by visiting https://rapidapi.com/Chetan11dev/api/google-scraper/pricing")
class Google:
@staticmethod
| def search(query: Union[str, List[str]], max: Optional[int] = None, key: Optional[str] =None, use_cache: bool = True) -> Dict: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: AI2lab/comfyUI-tool-2lab
# Path: nodes/common/utils.py
def downloadFileToTempFolder(url: str) -> str:
try:
response = requests.get(url)
response.raise_for_status()
try:
if not os.path.exists(temp_folder):
os.makedirs(temp_folder)
except Exception as e:
print(f"Fail to create directory '{temp_folder}. Error: {e}'")
return None
# temp file name
ext = getFileNameExt(url)
curtime = str(int(time.time()))
filename = curtime
if curtime != "":
filename = curtime+"."+ext
file_path = os.path.join(temp_folder,filename)
except:
return ''
return file_path
# Path: nodes/constants.py
def get_project_name(name):
return '{} ({})'.format(name, PROJECT_NAME)
# Path: nodes/constants.py
def get_project_category(sub_dirs = None):
start = "🦊" + PROJECT_NAME
if sub_dirs is None:
return start
else:
return "{}/{}".format(start,sub_dirs)
# Path: nodes/tool/preview.py
import numpy as np
import torch
from PIL import Image
from ..common.utils import downloadFileToTempFolder
from ..constants import get_project_name, get_project_category
NODE_CATEGORY = get_project_category("util/preview")
class ShowText:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"string": ("STRING", {"forceInput": True}),
},
"hidden": {
"unique_id": "UNIQUE_ID",
"extra_pnginfo": "EXTRA_PNGINFO",},
}
NAME = get_project_name('show_text')
CATEGORY = NODE_CATEGORY
RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("string",)
OUTPUT_NODE = True
FUNCTION = "doWork"
def doWork(self, string, unique_id=None, extra_pnginfo=None):
return {"ui": {"string": [string, ]}, "result": (string,)}
class ShowWebImage:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"image_url": ("STRING", {"multiline": False}),
"RGBA": (["false", "true"],{"default":False}),
},
}
NAME = get_project_name('show_web_image')
CATEGORY = NODE_CATEGORY
RETURN_TYPES = ("IMAGE", "MASK","TEXT","filePath")
RETURN_NAMES = ("image", "mask","image_url","filePath")
OUTPUT_NODE = True
FUNCTION = "doWork"
def doWork(self, image_url, RGBA):
print(image_url)
i = None
file_path = ''
try:
if image_url.startswith('http'):
file_path,i = self.download_image(image_url)
else:
file_path = image_url
i = Image.open(image_url)
if not i:
return
image = i
if not RGBA:
image = image.convert('RGB')
image = np.array(image).astype(np.float32) / 255.0
image = torch.from_numpy(image)[None,]
# RGBA - mask
if 'A' in i.getbands():
mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
mask = 1. - torch.from_numpy(mask)
else:
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
return (image, mask, image_url,file_path)
except :
pass
return (None, None, image_url,file_path)
def download_image(self, url):
| file_path = downloadFileToTempFolder(url) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Amirtheahmed/ddd-cqrs-fastapi
# Path: src/contexts/photostore/photo/domain/PhotoRepository.py
class PhotoRepository(ABC):
async def create_one(self, photo: Photo) -> NoReturn:
raise NotImplementedError()
# Path: src/contexts/photostore/photo/domain/entities/Photo.py
class Photo(AggregateRoot):
def __init__(
self,
photo_id: PhotoId,
name: PhotoName,
user_id: UserId,
file: PhotoFile,
tags: PhotoTags,
):
super().__init__()
self.id = photo_id
self.name = name
self.user_id = user_id
self.file = file
self.tags = tags
@staticmethod
def create(photo_id: PhotoId, name: PhotoName, user_id: UserId, file: PhotoFile, tags: PhotoTags):
photo = Photo(photo_id, name, user_id, file, tags)
event = PhotoCreatedDomainEvent(photo.id.value(), photo_id, user_id, name, tags)
photo.record_event(event)
return photo
@staticmethod
def create_from_primitives(raw_data: Dict[str, Any]):
photo = Photo(
PhotoId(raw_data.get('id')),
PhotoName(raw_data.get('name')),
UserId(raw_data.get('user-id')),
PhotoFile(raw_data.get('file')),
PhotoTags([PhotoTag(tag) for tag in raw_data.get('tags', default=[])]),
)
return photo
def to_primitives(self) -> Union[Dict, List]:
return {
'id': self.id.value(),
'name': self.name.value(),
'user-id': self.user_id.value(),
'tags': self.tags.values(),
}
# Path: src/contexts/photostore/photo/domain/entities/PhotoFile.py
class PhotoFile(ValueObject):
def __init__(self, content: bytes):
super().__init__(content)
# Path: src/contexts/photostore/photo/domain/entities/PhotoId.py
class PhotoId(ValueObject):
def __init__(self, value: str):
super().__init__(value)
if not Uuid.is_valid_uuid(value):
raise ValueObjectValidationError(f'PhotoId must be UUID V4. <{value}> found.')
# Path: src/contexts/photostore/photo/domain/entities/PhotoName.py
class PhotoName(ValueObject):
def __init__(self, value: str):
super().__init__(value)
# Path: src/contexts/photostore/photo/domain/entities/UserId.py
class UserId(ValueObject):
def __init__(self, value: str):
super().__init__(value)
# Path: src/contexts/shared/domain/EventBus.py
class EventBus(Interface):
@abstractmethod
async def publish(self, events: List[DomainEvent]):
raise NotImplementedError()
@abstractmethod
def add_subscribers(self, subscribers: List[EventSubscriber]):
raise NotImplementedError()
@abstractmethod
def start(self):
raise NotImplementedError()
# Path: src/contexts/photostore/photo/application/createone/PhotoCreator.py
from src.contexts.photostore.photo.domain.PhotoRepository import PhotoRepository
from src.contexts.photostore.photo.domain.entities.Photo import Photo
from src.contexts.photostore.photo.domain.entities.PhotoFile import PhotoFile
from src.contexts.photostore.photo.domain.entities.PhotoId import PhotoId
from src.contexts.photostore.photo.domain.entities.PhotoName import PhotoName
from src.contexts.photostore.photo.domain.entities.UserId import UserId
from src.contexts.shared.domain.EventBus import EventBus
class PhotoCreator:
def __init__(self, photo_repository: PhotoRepository, event_bus: EventBus):
self.__photo_repository = photo_repository
self.__event_bus = event_bus
| async def run(self, photo_id: PhotoId, name: PhotoName, user_id: UserId, file: PhotoFile): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: JINO-ROHIT/RAG-with-Memory
# Path: vlite_db/model.py
class EmbeddingModel:
'''
EmbeddingModel runs a transformer model and returns the embedding for a given text.
'''
def __init__(self, model_name='sentence-transformers/all-MiniLM-L6-v2'):
self.tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) # use_fast=True
self.model = AutoModel.from_pretrained(model_name)
self.dimension = self.model.embeddings.position_embeddings.embedding_dim
self.max_seq_length = self.model.embeddings.position_embeddings.num_embeddings
#print("Tokenizer:", self.tokenizer)
# print("Dimension:", self.dimension)
# print("Max sequence length:", self.max_seq_length)
def embed(self, texts, max_seq_length=256, device="mps"):
if(torch.backends.mps.is_available()):
dev = torch.device("mps")
else:
dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = torch.device(dev) # Create a torch.device object
print("Device:", device)
self.model.to(device) # Move the model to the specified device
encoded_input = self.tokenizer(texts, padding=True, truncation=True, return_tensors='pt', max_length=max_seq_length)
print("Encoded input done",encoded_input['input_ids'].shape)
if encoded_input['input_ids'].shape[0] > 1300:
print("Encoded input too large, defaulting to CPU")
device = torch.device("cpu")
self.model.to(device) # Move the model to the specified device
encoded_input = {name: tensor.to(device) for name, tensor in encoded_input.items()} # Move all input tensors to the specified device
print("Encoded input moved to device")
with torch.no_grad():
model_output = self.model(**encoded_input)
embeddings = mean_pooling(model_output, encoded_input['attention_mask'], device=device)
tensor_embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
np_embeddings = tensor_embeddings.cpu().numpy() # Move tensor to CPU before converting to numpy
return np_embeddings
def token_count(self, texts):
tokens = 0
for text in texts:
tokens+=len(self.tokenizer.tokenize(text))
# Path: vlite_db/utils.py
def chop_and_chunk(text, max_seq_length=1024):
"""
Chop and chunk a text into smaller pieces of text.
Args:
text: string, list of strings, or array of strings
max_seq_length: maximum length of the text
"""
chunks = []
chunk = ''
for tokens in text.split(' '):
count = 0
chunk += tokens + ' '
if len(chunk) > max_seq_length:
chunks.append(chunk)
chunk = ''
return chunks
# Path: vlite_db/utils.py
def cos_sim(a, b):
sims = a @ b.T
sims /= np.linalg.norm(a) * np.linalg.norm(b, axis=1)
return sims
# Path: vlite_db/main.py
import numpy as np
import datetime
from uuid import uuid4
from .model import EmbeddingModel
from .utils import chop_and_chunk, cos_sim
class VLite:
'''
vlite is a simple vector database that stores vectors in a numpy array.
'''
def __init__(self, collection=None,device='mps',model_name=None):
# Filename must be unique between runs. Saving to the same file will append vectors to previous run's vectors
if collection is None:
current_datetime = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
collection = f"vlite_{current_datetime}.npz"
self.collection = collection
self.device = device
self.model = EmbeddingModel() if model_name is None else EmbeddingModel(model_name)
try:
with np.load(self.collection, allow_pickle=True) as data:
self.texts = data['texts'].tolist()
self.metadata = data['metadata'].tolist()
self.vectors = data['vectors']
except FileNotFoundError:
self.texts = []
self.metadata = {}
self.vectors = np.empty((0, self.model.dimension))
def add_vector(self, vector):
self.vectors = np.vstack((self.vectors, vector))
def get_similar_vectors(self, vector, top_k=5):
sims = cos_sim(vector, self.vectors)
sims = sims[0]
# print("[get_similar_vectors] Sims:", sims.shape)
top_k_idx = np.argsort(sims)[::-1][:top_k]
# print("[get_similar_vectors] Top k idx:", top_k_idx)
# print("[get_similar_vectors] Top k sims:", sims[top_k_idx])
return top_k_idx, sims[top_k_idx]
def memorize(self, text, id=None, metadata=None):
id = id or str(uuid4())
| chunks = chop_and_chunk(text) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: avataar/bg_electricity_regulated_pricing
# Path: custom_components/bg_electricity_regulated_pricing/const.py
CONF_TARIFF_TYPE = "tariff_type"
# Path: custom_components/bg_electricity_regulated_pricing/const.py
CONF_PROVIDER = "provider"
# Path: custom_components/bg_electricity_regulated_pricing/const.py
CONF_CUSTOM_DAY_PRICE = "custom_day_price"
# Path: custom_components/bg_electricity_regulated_pricing/const.py
CONF_CUSTOM_NIGHT_PRICE = "custom_night_price"
# Path: custom_components/bg_electricity_regulated_pricing/const.py
PROVIDER_PRICES = {
# Section 6.1, https://www.dker.bg/uploads/reshenia/2023/res_c_14_23.pdf
"electrohold": {
"day": .14875,
"night": .05997,
"fees": .01623 + .00754 + .04232
},
# Section 6.1, https://www.dker.bg/uploads/reshenia/2023/res_c_14_23.pdf
"evn": {
"day": .14667,
"night": .05531,
"fees": .01623 + .00803 + .04366
},
# Section 6.3, https://www.dker.bg/uploads/reshenia/2023/res_c_14_23.pdf
"energo_pro": {
"day": .15076,
"night": .05279,
"fees": .01623 + .00959 + .04825
}
}
# Path: custom_components/bg_electricity_regulated_pricing/const.py
CONF_CLOCK_OFFSET = "clock_offset"
# Path: custom_components/bg_electricity_regulated_pricing/const.py
BGN_PER_KILOWATT_HOUR = f"BGN/{UnitOfEnergy.KILO_WATT_HOUR}"
# Path: custom_components/bg_electricity_regulated_pricing/const.py
VAT_RATE = 0.2
# Path: custom_components/bg_electricity_regulated_pricing/const.py
DOMAIN = "bg_electricity_regulated_pricing"
# Path: custom_components/bg_electricity_regulated_pricing/sensor.py
from homeassistant.components.sensor import SensorEntity, SensorEntityDescription, \
SensorStateClass
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util import utcnow
from homeassistant.helpers.device_registry import DeviceEntryType, DeviceInfo
from .const import CONF_TARIFF_TYPE, CONF_PROVIDER, CONF_CUSTOM_DAY_PRICE, \
CONF_CUSTOM_NIGHT_PRICE, PROVIDER_PRICES, CONF_CLOCK_OFFSET, \
BGN_PER_KILOWATT_HOUR, VAT_RATE, DOMAIN
"""Sensor platform for bg_electricity_regulated_pricing integration."""
from __future__ import annotations
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Initialize bg_electricity_regulated_pricing config entry."""
name = config_entry.title
unique_id = config_entry.entry_id
tariff_type = config_entry.options[CONF_TARIFF_TYPE]
clock_offset = config_entry.options[CONF_CLOCK_OFFSET]
provider = config_entry.options[CONF_PROVIDER]
if provider == "custom":
| price_day = config_entry.options[CONF_CUSTOM_DAY_PRICE] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Qazalbash/jaxtro
# Path: jaxtro/utils/parser.py
def parse_config(config_path: str) -> dict:
# Path: jaxtro/utils/popgen.py
class PopulationGenerator:
"""Class to generate population and save them to disk."""
def __init__(self, general: dict, models: dict) -> None:
"""__init__ method for PopulationGenerator.
Parameters
----------
config : dict
Configuration dictionary for PopulationGenerator.
"""
self.check_general(general)
for model in models:
self.check_models(model)
self._size: int = general["size"]
self._error_scale: float = general["error_scale"]
self._error_size: int = general["error_size"]
self._root_container: str = general["root_container"]
self._event_filename: str = general["event_filename"]
self._config_filename: str = general["config_filename"]
self._models: list[ContinuousRV] = models
@staticmethod
def check_general(general: dict) -> None:
"""Check if all the required configs are present."""
assert general.get("size", None) is not None
assert general.get("error_scale", None) is not None
assert general.get("error_size", None) is not None
assert general.get("root_container", None) is not None
assert general.get("event_filename", None) is not None
assert general.get("config_filename", None) is not None
@staticmethod
def check_models(model: dict) -> None:
"""Check if all the required configs are present."""
assert model.get("model", None) is not None
assert model.get("config_vars", None) is not None
assert model.get("col_names", None) is not None
assert model.get("params", None) is not None
def generate(self):
"""Generate population and save them to disk."""
os.makedirs(self._root_container, exist_ok=True)
container = f"{self._root_container}"
os.makedirs(container, exist_ok=True)
config_vals = []
col_names = []
realisations = np.empty((self._size, 0))
for model in self._models:
model_instance: ContinuousRV = eval(model["model"])(**model["params"])
rvs = model_instance.rvs(self._size)
realisations = jnp.concatenate((realisations, rvs), axis=1)
config_vals.extend([(x, model["params"][x]) for x in model["config_vars"]])
col_names.extend(model["col_names"])
dump_configurations(
f"{container}/{self._config_filename}",
*config_vals,
)
for event_num, realisation in tqdm(enumerate(realisations),
desc=f"Generating events",
total=self._size,
unit=" events",
unit_scale=True):
filename = f"{container}/{self._event_filename.format(event_num)}"
realisation_err = add_normal_error(
*realisation,
scale=self._error_scale,
size=self._error_size,
)
np.savetxt(
filename,
realisation_err,
header="\t".join(col_names),
)
# Path: jaxtro/main.py
from .utils import PopulationGenerator, parser
# Copyright 2023 The Jaxtro Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def main():
args = parser.cmd_parser.parse_args()
configuration_dict = parser.parse_config(args.my_config)
general = configuration_dict['general']
models = [configuration_dict.get('mass_model', None), configuration_dict.get('spin_model', None)]
| pg = PopulationGenerator(general=general, models=models) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: smonsays/modular-hyperteacher
# Path: metax/data/base.py
class Dataset(NamedTuple):
x: Array
y: Array
info: Dict = dict()
# Path: metax/data/utils.py
def batch_generator(rng, datastruct, steps, batch_size):
"""
Add leading dims to datastruct resulting in (steps, batch_size, *data.shape).
If batch_size is None, repeat each data leaf, otherwise sample random batches.
"""
if batch_size is None or batch_size < 1:
# Repeat whole data on new leading dim for number of steps
def repeat(x):
return jnp.repeat(jnp.expand_dims(x, axis=0), steps, axis=0)
return jtu.tree_map(repeat, datastruct)
else:
rng_batch = jax.random.split(rng, steps)
batch_get_batch = jax.vmap(get_batch, in_axes=(0, None, None))
return batch_get_batch(rng_batch, datastruct, batch_size)
# Path: metax/module/init.py
class LearnedInit(MetaModule):
def __init__(self, loss_fn_inner, loss_fn_outer, base_learner, reg_strength):
super().__init__(loss_fn_inner, loss_fn_outer)
self.base_learner = base_learner
if reg_strength is not None:
# Use iMAML regularizer towards meta-learned init
key_map = {"base_learner": "base_learner_init"}
self.loss_fn_inner += energy.iMAML(
reg_strength=reg_strength,
key_map=key_map,
reduction="sum"
)
def __call__(self, rng, state, hstate, params, hparams, input, is_training):
output, state = self.base_learner.apply(
params.base_learner, state.base_learner, rng, input, is_training
)
return output, (LearnedInitState(state), hstate)
def reset_hparams(self, rng, sample_input):
params_base_learner, _ = self.base_learner.init(rng, sample_input, is_training=True)
# Re-using params container here to simplify implementation of reptile
return LearnedInitMetaParams(params_base_learner), LearnedInitMetaState()
def reset_params(self, rng, hparams, hstate, sample_input):
_, state_base_learner = self.base_learner.init(rng, sample_input, is_training=True)
return LearnedInitParams(hparams.base_learner_init), LearnedInitState(state_base_learner)
# Path: metax/module/init.py
class LearnedInitMetaParams(NamedTuple):
base_learner_init: Dict
# Path: metax/utils/utils.py
def append_keys(dictionary, suffix):
return {key + "_" + suffix: value for key, value in dictionary.items()}
# Path: metax/learner/base.py
class MetaGradLearner(MetaLearnerInnerGradientDescent):
"""
Abstract base class for meta-learning algorithms that estimate the meta-gradient.
"""
def __init__(
self,
meta_model: MetaModule,
batch_size: int,
steps_inner: int,
optim_fn_inner: optax.GradientTransformation,
optim_fn_outer: optax.GradientTransformation,
):
super().__init__(meta_model, batch_size, steps_inner, optim_fn_inner)
self.optim_fn_outer = optim_fn_outer
self.batch_grad = jax.vmap(self.grad, in_axes=(0, None, None, 0))
@abc.abstractmethod
def grad(
self, rng: chex.PRNGKey, hstate: HState, hparams: HParams, metadataset: data.MetaDataset
) -> Tuple[chex.Array, HState, Dict]:
pass
def update(self, rng, meta_state, metadataset: data.MetaDataset):
rng_batch = jax.random.split(rng, len(metadataset.train.x))
hgrads, hstate, metrics = self.batch_grad(
rng_batch, meta_state.hstate, meta_state.hparams, metadataset
)
hgrads = jtu.tree_map(partial(jnp.mean, axis=0), hgrads) # Average hgrads across tasks
hparams_update, optim_state = self.optim_fn_outer.update(
hgrads, meta_state.optim, meta_state.hparams
)
hparams = optax.apply_updates(meta_state.hparams, hparams_update)
# HACK: Averaging over the model state might result in unexpected behaviour
# HACK: Averaging might change dtype (e.g. int to float), this simply casts it back
hstate_dtypes = jtu.tree_map(jnp.dtype, hstate)
hstate = jtu.tree_map(partial(jnp.mean, axis=0), hstate)
hstate = jtu.tree_map(jax.lax.convert_element_type, hstate, hstate_dtypes)
metrics = jtu.tree_map(partial(jnp.mean, axis=0), metrics)
return MetaLearnerState(hparams=hparams, optim=optim_state, hstate=hstate), metrics
# Path: metax/learner/reptile.py
import jax
import jax.numpy as jnp
import jax.tree_util as jtu
import optax
from metax.data import Dataset, batch_generator
from metax.module import LearnedInit
from metax.module.init import LearnedInitMetaParams
from metax.utils import append_keys
from .base import MetaGradLearner
"""
Copyright (c) Simon Schug
All rights reserved.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
| class Reptile(MetaGradLearner): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: AContesini/Convert_PDF_to_DOCX_or_vice-versa
# Path: venv/Lib/site-packages/tqdm/auto.py
class tqdm(notebook_tqdm, asyncio_tqdm): # pylint: disable=inconsistent-mro
pass
# Path: venv/Lib/site-packages/tqdm/std.py
class TqdmWarning(Warning):
"""base class for all tqdm warnings.
Used for non-external-code-breaking errors, such as garbled printing.
"""
def __init__(self, msg, fp_write=None, *a, **k):
if fp_write is not None:
fp_write("\n" + self.__class__.__name__ + ": " + str(msg).rstrip() + '\n')
else:
super(TqdmWarning, self).__init__(msg, *a, **k)
# Path: venv/Lib/site-packages/tqdm/contrib/concurrent.py
from contextlib import contextmanager
from operator import length_hint
from os import cpu_count
from ..auto import tqdm as tqdm_auto
from ..std import TqdmWarning
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import ProcessPoolExecutor
from warnings import warn
"""
Thin wrappers around `concurrent.futures`.
"""
__author__ = {"github.com/": ["casperdcl"]}
__all__ = ['thread_map', 'process_map']
@contextmanager
def ensure_lock(tqdm_class, lock_name=""):
"""get (create if necessary) and then restore `tqdm_class`'s lock"""
old_lock = getattr(tqdm_class, '_lock', None) # don't create a new lock
lock = old_lock or tqdm_class.get_lock() # maybe create a new lock
lock = getattr(lock, lock_name, lock) # maybe subtype
tqdm_class.set_lock(lock)
yield lock
if old_lock is None:
del tqdm_class._lock
else:
tqdm_class.set_lock(old_lock)
def _executor_map(PoolExecutor, fn, *iterables, **tqdm_kwargs):
"""
Implementation of `thread_map` and `process_map`.
Parameters
----------
tqdm_class : [default: tqdm.auto.tqdm].
max_workers : [default: min(32, cpu_count() + 4)].
chunksize : [default: 1].
lock_name : [default: "":str].
"""
kwargs = tqdm_kwargs.copy()
if "total" not in kwargs:
kwargs["total"] = length_hint(iterables[0])
tqdm_class = kwargs.pop("tqdm_class", tqdm_auto)
max_workers = kwargs.pop("max_workers", min(32, cpu_count() + 4))
chunksize = kwargs.pop("chunksize", 1)
lock_name = kwargs.pop("lock_name", "")
with ensure_lock(tqdm_class, lock_name=lock_name) as lk:
# share lock in case workers are already using `tqdm`
with PoolExecutor(max_workers=max_workers, initializer=tqdm_class.set_lock,
initargs=(lk,)) as ex:
return list(tqdm_class(ex.map(fn, *iterables, chunksize=chunksize), **kwargs))
def thread_map(fn, *iterables, **tqdm_kwargs):
"""
Equivalent of `list(map(fn, *iterables))`
driven by `concurrent.futures.ThreadPoolExecutor`.
Parameters
----------
tqdm_class : optional
`tqdm` class to use for bars [default: tqdm.auto.tqdm].
max_workers : int, optional
Maximum number of workers to spawn; passed to
`concurrent.futures.ThreadPoolExecutor.__init__`.
[default: max(32, cpu_count() + 4)].
"""
return _executor_map(ThreadPoolExecutor, fn, *iterables, **tqdm_kwargs)
def process_map(fn, *iterables, **tqdm_kwargs):
"""
Equivalent of `list(map(fn, *iterables))`
driven by `concurrent.futures.ProcessPoolExecutor`.
Parameters
----------
tqdm_class : optional
`tqdm` class to use for bars [default: tqdm.auto.tqdm].
max_workers : int, optional
Maximum number of workers to spawn; passed to
`concurrent.futures.ProcessPoolExecutor.__init__`.
[default: min(32, cpu_count() + 4)].
chunksize : int, optional
Size of chunks sent to worker processes; passed to
`concurrent.futures.ProcessPoolExecutor.map`. [default: 1].
lock_name : str, optional
Member of `tqdm_class.get_lock()` to use [default: mp_lock].
"""
if iterables and "chunksize" not in tqdm_kwargs:
# default `chunksize=1` has poor performance for large iterables
# (most time spent dispatching items to workers).
longest_iterable_len = max(map(length_hint, iterables))
if longest_iterable_len > 1000:
warn("Iterable length %d > 1000 but `chunksize` is not set."
" This may seriously degrade multiprocess performance."
" Set `chunksize=1` or more." % longest_iterable_len,
| TqdmWarning, stacklevel=2) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: willfinnigan/RetroBioCat_2
# Path: rbc2/configs/download_data_files/download_aizynthfinder.py
def does_aizynthfinder_exist() -> bool:
if not os.path.exists(f"{path_to_data_folder}/aizynthfinder/uspto_model.hdf5"):
return False
if not os.path.exists(f"{path_to_data_folder}/aizynthfinder/uspto_templates.hdf5"):
return False
return True
# Path: rbc2/configs/download_data_files/download_aizynthfinder.py
def download_aizynthfinder_model():
aizynthfinder_model = "https://figshare.com/ndownloader/files/23086454"
aizynthfinder_templates = "https://figshare.com/ndownloader/files/23086457"
# if aizynthfinder folder doesn't exist, create it with Pathlib
directory = f"{path_to_data_folder}/aizynthfinder"
Path(directory).mkdir(parents=True, exist_ok=True)
filename = "uspto_model.hdf5"
filepath = f"{directory}/{filename}"
download_file(aizynthfinder_model, filepath)
filename = "uspto_templates.hdf5"
filepath = f"{directory}/{filename}"
download_file(aizynthfinder_templates, filepath)
# Path: rbc2/utils/add_logger.py
def add_logger(name, level='DEBUG'):
logger = logging.getLogger(name)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(level)
logger.propagate = False
return logger
# Path: rbc2/configs/data_path.py
DEFAULT_DATA_FOLDER = str(Path(__file__).parents[1]) + '/data'
RBC2_DATA_PATH = os.getenv('RBC2_DATA_PATH')
# Path: rbc2/configs/expansion_config.py
class Expansion_Config():
def __init__(self):
# rule application
self.allow_chiral_symmetry = False
self.check_chiral_products = True
self.combine_enantiomers = True
self.allow_cyclic_reaction_outcomes = False
self.clean_brackets = True
# reaction parsing
self.allow_backwards = False
self.allow_duplicates = False
self.duplicates_require_same_expander = True
self.duplicates_require_same_domain = False
self.duplicates_require_same_name = False
self.merge_duplicate_metadata = True
self.force_rdkit_smis = True
# expanders general
self.max_reactions = None # max reactions (not options)
# reaction filtering and blocking
self.use_max_mw_for_enzymes = False
self.max_mw_to_use_enzymes = 300
def update_from_dict(self, attr_dict):
current_dict = self.to_dict()
for key, value in attr_dict.items():
if key in current_dict:
setattr(self, key, value)
return self
def to_dict(self):
return self.__dict__
# Path: rbc2/utils/load_keras_models.py
def tensorflow_imports():
def __init__(self, filename):
def __len__(self):
def predict(self, *args: np.ndarray, **_: np.ndarray):
CUSTOM_OBJECTS = {"top10_acc": top10_acc, "top50_acc": top50_acc}
class LocalKerasModel:
# Path: rbc2/utils/fingerprints.py
def get_mol_fingerprint(rd_mol, radius=2, nBits=2048):
def get_reaction_fingerprint(product_mol, substrate_mols, radius=2, nBits=2048):
# Path: rbc2/expansion/expanders/action_getters/aizynthfinder/aizynthfinder_actions.py
import time
import numpy as np
import pandas as pd
from rdkit import Chem
from rbc2.configs.download_data_files.download_aizynthfinder import does_aizynthfinder_exist, \
download_aizynthfinder_model
from rbc2.utils.add_logger import add_logger
from rbc2.configs.data_path import path_to_data_folder
from rbc2.configs.expansion_config import Expansion_Config
from rbc2.utils import load_keras_models, fingerprints
data_folder = f'{path_to_data_folder}/aizynthfinder'
class AizynthfinderActionGetter():
def __init__(self,
template_column='retro_template',
cutoff_cumulative=0.995,
cutoff_number=50,
log_level='WARNING'):
self.logger = add_logger('AIZynthfinder_Actions', level=log_level)
self.policy_model = None
self.templates = None
self.template_column = template_column
self.cutoff_cumulative = cutoff_cumulative
self.cutoff_number = cutoff_number
if does_aizynthfinder_exist() == False:
download_aizynthfinder_model()
def load_model(self):
if self.policy_model == None:
policy_path = data_folder + '/uspto_model.hdf5'
self.policy_model = load_keras_models.LocalKerasModel(policy_path)
if self.templates == None:
templates_path = data_folder + '/uspto_templates.hdf5'
self.templates = pd.read_hdf(templates_path, "table")
def get_actions(self, smi):
reactions = []
priors = []
template_column = self.template_column
mol = Chem.MolFromSmiles(smi)
all_transforms_prop = self._predict(mol)
probable_transforms_idx = self._cutoff_predictions(all_transforms_prop)
possible_moves = self.templates.iloc[probable_transforms_idx]
probs = all_transforms_prop[probable_transforms_idx]
priors.extend(probs)
for idx, (move_index, move) in enumerate(possible_moves.iterrows()):
metadata = dict(move)
del metadata[template_column]
metadata["policy_probability"] = round(float(probs[idx]), 5)
metadata["template_code"] = move_index
reaction = {'smarts': move[template_column],
'metadata': metadata,
'prior': priors[idx]}
reactions.append(reaction)
return reactions
def get_rxns(self, smile):
if self.policy_model == None:
self.load_model()
reactions = self.get_actions(smile)
rxns = {}
metadata = {}
for reaction in reactions:
name = f"Chem_{reaction['metadata']['classification']}"
num = 1
extra_string = f"__{num}"
while name+extra_string in rxns:
extra_string = f"__{num}"
num += 1
name = name+extra_string
smarts = reaction['smarts']
if self._does_smarts_only_one_reactants(smarts):
rxns[name] = [smarts]
else:
rxns[name] = []
metadata[name] = reaction['metadata']
return rxns, metadata
def _predict(self, mol):
| fingerprint = fingerprints.get_mol_fingerprint(mol, 2, nBits=len(self.policy_model)) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: DomingoJoseCab/AutoTube
# Path: utils/edition/autoediting.py
def load_videos(videos_path):
video_list = []
videos = os.listdir(videos_path)
for vid in videos:
video = VideoFileClip(os.path.join(videos_path,vid))
video_list.append(video)
return video_list
# Path: utils/edition/autoediting.py
def load_audio(audio_path):
audio_list = []
audios = os.listdir(audio_path)
for au in audios:
audio = AudioFileClip(os.path.join(audio_path,au))
audio_list.append(audio)
return audio_list
# Path: utils/edition/autoediting.py
def generate_product(video, audio):
ordered_clips = generate_subclip(video)
repetitions = ceil(audio.duration / sum(clip.duration for clip in ordered_clips))
final_clips_sequence = ordered_clips * repetitions
final_clips_sequence = concatenate_videoclips(final_clips_sequence).subclip(0, audio.duration+1)
final_video = final_clips_sequence.set_audio(CompositeAudioClip([audio.set_start(0.5)]))
return final_video
# Path: utils/edition/autoediting.py
def generate_intro(videos, audio):
selected_video = choice(videos)
audio_duration = audio.duration
total_video_duration = audio_duration + 1
start_time = choice(range(int(selected_video.duration - total_video_duration)))
video_clip = selected_video.subclip(start_time, start_time + total_video_duration)
adjusted_audio = CompositeAudioClip([audio.set_start(0.5)])
video_clip = video_clip.set_audio(adjusted_audio)
return video_clip
# Path: utils/edition/autoediting.py
def generate_outro(videos, audio):
selected_video = choice(videos)
audio_duration = audio.duration
clips = generate_subclip(selected_video)
total_video_duration = audio_duration + 25
repetitions = ceil(total_video_duration / sum(clip.duration for clip in clips))
final_clips = clips * repetitions
final_clips = concatenate_videoclips(final_clips).subclip(0, total_video_duration)
adjusted_audio = CompositeAudioClip([audio.set_start(0.5)])
video_clip = final_clips.set_audio(adjusted_audio)
return video_clip
# Path: utils/edition/autotext.py
def title_intro(title:str, video):
texto = TextClip(title, fontsize=40, color='white', font='Bebas Neue Bold')
texto = texto.set_position('center').set_duration(6)
color_clip = ColorClip(video.size, color=(0, 0, 0), duration=texto.duration)
color_clip = color_clip.set_opacity(0.9) # Ajusta la opacidad
color_clip = color_clip.set_start(4)
texto = texto.set_start(4).crossfadein(1)
video_opaco = CompositeVideoClip([video, color_clip])
video_final = CompositeVideoClip([video_opaco, texto])
return video_final
# Path: utils/edition/edit.py
import os
import json
from moviepy.editor import CompositeVideoClip
from utils.edition.autoediting import load_videos, load_audio, generate_product, generate_intro, generate_outro
from utils.edition.autotext import title_intro
from moviepy.config import change_settings
# ==============================================================================
# AutoTube Script
# Creado por: Domingo Caballero
# Canal de YouTube: https://www.youtube.com/@emprendedomingo?=sub_confirmation=1
# Lista de Correo: https://emprendecondomingo.substack.com/
# ==============================================================================
def main(videos_path, audios_path, output_path, names, base_path):
videos = load_videos(videos_path)
audios = load_audio(audios_path)
audio_intro = audios.pop(0)
audio_outro = audios.pop(-1)
| intro = generate_intro(videos, audio_intro)
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: gregorybchris/typogenetics
# Path: typogenetics/search.py
class Editor:
PROB_MUTATE = 0.80
PROB_INSERT = 0.10
PROB_DELETE = 0.10
@classmethod
def edit(cls, strand: Strand, rng: Generator) -> Strand:
edit_type = cls.select_edit_type(rng)
if edit_type == EditType.MUTATE:
return cls.mutate(strand, rng)
if edit_type == EditType.INSERT:
return cls.insert(strand, rng)
if edit_type == EditType.DELETE:
return cls.delete(strand, rng)
@classmethod
def mutate(cls, strand: Strand, rng: Generator) -> Strand:
r1 = rng.integers(0, len(strand))
new_bases = strand.bases.copy()
base = new_bases[r1]
while new_bases[r1] == base:
all_bases = [Base.A, Base.C, Base.G, Base.T]
r2 = rng.integers(0, len(all_bases))
new_bases[r1] = all_bases[r2]
return Strand(new_bases)
@classmethod
def insert(cls, strand: Strand, rng: Generator) -> Strand:
r1 = rng.integers(0, len(strand) + 1)
new_bases = strand.bases.copy()
all_bases = [Base.A, Base.C, Base.G, Base.T]
r2 = rng.integers(0, len(all_bases))
new_bases.insert(r1, all_bases[r2])
return Strand(new_bases)
@classmethod
def delete(cls, strand: Strand, rng: Generator) -> Strand:
r1 = rng.integers(0, len(strand))
new_bases = strand.bases.copy()
new_bases.pop(r1)
return Strand(new_bases)
@classmethod
def select_edit_type(cls, rng: Generator) -> EditType:
r = rng.random()
edit_types = [
(EditType.MUTATE, cls.PROB_MUTATE),
(EditType.INSERT, cls.PROB_INSERT),
(EditType.DELETE, cls.PROB_DELETE),
]
assert np.isclose(sum(dict(edit_types).values()), 1.0)
for edit_type, prob in edit_types:
if r <= prob:
return edit_type
r -= prob
raise ValueError("Random number is not in range [0, 1]")
# Path: typogenetics/search.py
class EditType(StrEnum):
MUTATE = auto()
INSERT = auto()
DELETE = auto()
# Path: typogenetics/typogenetics.py
class Strand:
bases: List[Base]
@classmethod
def from_str(cls, strand_str: str) -> "Strand":
bases = []
for base_str in strand_str:
if base_str == " ":
continue
base = Base.from_str(base_str)
bases.append(base)
return cls(bases)
def iter_bases(self) -> Iterator[Base]:
yield from self.bases
def iter_duplets(self) -> Iterator[Duplet]:
unit = 0
while True:
if unit + 1 >= len(self):
break
yield (self[unit], self[unit + 1])
unit += 2
def __repr__(self) -> str:
return "".join([str(b) for b in self.bases])
def __str__(self) -> str:
return self.__repr__()
def __getitem__(self, unit: int) -> Base:
return self.bases[unit]
def __len__(self) -> int:
return len(self.bases)
# Path: tests/test_search.py
import numpy as np
from typogenetics.search import Editor, EditType
from typogenetics.typogenetics import Strand
class TestSearch:
def test_select_edit_type(self) -> None:
rng = np.random.default_rng(42)
assert Editor.select_edit_type(rng) == EditType.INSERT
def test_mutate(self) -> None:
rng = np.random.default_rng(42)
| strand = Strand.from_str("ACGT") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: chaoren2357/gsplatstudio
# Path: gsplatstudio/data/processor/base_processor.py
class BaseDataProcessor(ABC):
def __init__(self, cfg, logger, source_path) -> None:
self.cfg = parse_structured(self.config_class, cfg)
self.logger = logger
self.source_path_str = source_path
@property
@abstractmethod
def config_class(self):
pass
@property
def should_skip(self):
pass
@abstractmethod
def run(self):
pass
def run_command_with_realtime_output(self, cmd):
"""
Run the specified command and output the results in real-time.
:param cmd: The command string to run.
:return: The exit code of the command.
"""
self.logger.info(f"Running command: {cmd}")
# Start the process
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
# Read output in real-time
while True:
output = process.stdout.readline()
if output == '' and process.poll() is not None:
break
if output:
self.logger.verbose(output.strip())
# Read any remaining error output
stderr_output = process.stderr.read()
if stderr_output:
self.logger.error("Error Output:")
self.logger.error(stderr_output.strip())
# Return the exit code
return process.returncode
# Path: gsplatstudio/utils/general_utils.py
def load_json(json_file):
with open(json_file, 'r') as file:
return json.load(file)
# Path: gsplatstudio/utils/camera_utils.py
def transform_camera_from_carla_matrix_to_colmap_quaternion(camera_data):
x_carla,y_carla,z_carla,roll_carla,pitch_carla,yaw_carla = camera_data['x'],camera_data['y'],camera_data['z'],camera_data['roll'],camera_data['pitch'],camera_data['yaw']
x = y_carla
y = -z_carla
z = x_carla
roll = pitch_carla
pitch = yaw_carla
yaw = roll_carla
C2W_matrix = get_transform_matrix(x, y, z, pitch, roll, yaw)
W2C_matrix = np.linalg.inv(C2W_matrix)
W2C_quaternion = rotmat2qvec(W2C_matrix[:3, :3])
W2C_translation = W2C_matrix[:3, 3]
return W2C_quaternion, W2C_translation
# Path: gsplatstudio/utils/camera_utils.py
def fov_to_focal_length(fov_degrees, width):
fov_radians = np.radians(fov_degrees)
focal_length = (width / 2) / np.tan(fov_radians / 2)
return focal_length
# Path: gsplatstudio/data/processor/colmapWcam_processor.py
import gsplatstudio
import sqlite3
from gsplatstudio.utils.type_utils import *
from gsplatstudio.data.processor.base_processor import BaseDataProcessor
from pathlib import Path
from gsplatstudio.utils.general_utils import load_json
from gsplatstudio.utils.camera_utils import transform_camera_from_carla_matrix_to_colmap_quaternion, fov_to_focal_length
@dataclass
class ColmapWithCamProcessorConfig:
use_gpu: bool = True
camera: str = "OPENCV"
map_ba_global_function_tolerance: float = 0.000001
@gsplatstudio.register("colmap_with_cam-processor")
class ColmapWithCamProcessor(BaseDataProcessor):
def __init__(self, cfg, logger, source_path) -> None:
super().__init__(cfg, logger, source_path)
@property
def config_class(self):
return ColmapWithCamProcessorConfig
@property
def should_skip(self):
cameras_file = Path(self.source_path_str) / "sparse" / "0" / "cameras.bin"
images_file = Path(self.source_path_str) / "sparse" / "0" / "images.bin"
points3D_file = Path(self.source_path_str) / "sparse" / "0" / "points3D.bin"
return cameras_file.exists() and images_file.exists() and points3D_file.exists()
def run(self):
self.logger.info("Start running ColmapWithCamProcessorConfig...")
project_folder = Path(self.source_path_str) / "distorted"
project_folder.mkdir(parents=True, exist_ok=True)
database_path = Path(self.source_path_str) / "distorted" / "database.db"
image_distorted_folder = Path(self.source_path_str) / "input"
camera_folder = Path(self.source_path_str) / "camera"
## Feature extraction
feature_extractor_cmd = "colmap feature_extractor" + \
f" --database_path {str(database_path)}" + \
f" --image_path {str(image_distorted_folder)}" + \
f" --ImageReader.single_camera 1" + \
f" --ImageReader.camera_model {self.cfg.camera}" + \
f" --SiftExtraction.use_gpu {int(self.cfg.use_gpu)}"
exit_code = self.run_command_with_realtime_output(feature_extractor_cmd)
if exit_code != 0:
self.logger.error(f"Feature extraction failed with code {exit_code}. Exiting.")
exit(exit_code)
self.logger.info("Finish feature extraction...")
## Create points3D.txt
points3D_txt_path = project_folder / 'points3D.txt'
open(str(points3D_txt_path), 'w').close()
## Create camera.txt
camera_txt_path = project_folder / 'cameras.txt'
open(str(camera_txt_path), 'w').close()
unique_cameras = {}
camera_id = 1
for camera_file in camera_folder.glob('*.json'):
camera_data = load_json(camera_file)
intrinsics = camera_data['intrinsics']
| focal_length = fov_to_focal_length(intrinsics['fov'], intrinsics['width']) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ddjerqq/beam
# Path: src/types/user.py
class User:
id: int
username: str
avatar_url: str
# Path: src/types/video.py
class Video:
"""Tiktok video object"""
id: str
"""Unique identifier for the TikTok video. Also called "item_id"""
create_time: int
"""UTC Unix epoch (in seconds) of when the TikTok video was posted."""
cover_image_url: str
"""A CDN link for the video's cover image. The image is static. Due to our trust and safety policies, the link has a TTL of 6 hours."""
share_url: str
"""A shareable link for this TikTok video. Note that the website behaves differently on Mobile and Desktop devices."""
video_description: str
"""The description that the creator has set for the TikTok video. Max length: 150"""
duration: int
"""The duration of the TikTok video in seconds."""
height: int
"""The height of the TikTok video."""
width: int
"""The width of the TikTok video."""
title: str
"""The video title. Max length: 150"""
embed_html: str
"""HTML code for embedded video"""
embed_link: str
"""Video embed link of tiktok.com"""
like_count: int
"""Number of likes for the video"""
comment_count: int
"""Number of comments on the video"""
share_count: int
"""Number of shares of the video"""
view_count: int
"""Number of views of the video"""
@property
def create_timestamp(self) -> datetime.datetime:
return datetime.datetime.fromtimestamp(self.create_time, tz=datetime.UTC)
# Path: src/util.py
import os
import httpx
from src.types.user import User
from src.types.video import Video
def get_env(key: str, default: str = None) -> str:
"""
gets the environment variable with the given key,
or raises an exception if the default is not supplied.
"""
var = os.getenv("APP_ID", default)
if var is not None:
return var
raise Exception(f"Environment variable {key} not found.")
def humanize(num: int) -> str:
"""
converts a number to a human readable format.
"""
if num < 1000:
return str(num)
num = num / 1000
if num < 1000:
return f"{num:.1f}k"
num = num / 1000
if num < 1000:
return f"{num:.1f}m"
num = num / 1000
return f"{num:.1f}b"
| def video_info_to_webhook_payload(author: User, video: Video) -> dict[str, str]: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: onestepai/api_rag
# Path: src/config/ServiceApiConfig.py
class ServiceApiConfig(ServiceApiConfigBase):
def __init__(self):
ServiceApiConfigBase.__init__(self,
url_prefix=DockerConfig.URL_PREFIX + DockerConfig.API_VERSION,
version=DockerConfig.API_VERSION,
title=DockerConfig.API_TITLE,
description=DockerConfig.API_DESCRIPTION,
gpt_api_key= DockerConfig.GPT_API_KEY,
gpt_4_model= DockerConfig.GPT_API_VERSION_4,
gpt_3_5_model=DockerConfig.GPT_API_VERSION_35,
prompt_language=DockerConfig.PROMPT_LANGUAGE
)
self.__set_predict_request()
self.__set_predict_response()
def __set_predict_request(self):
request = ServiceApiConfigBase.api.model('PredictRequest.extractResult', {
'utterance': fields.String(description='content'),
'model_name': fields.String(description='model name'),
'language': fields.String(description='language')
})
predict_request = ServiceApiConfigBase.api.model('PredictRequest', {
'requestId': fields.String(description='request id'),
'request': fields.Nested(request, description='request'),
'timestamp': fields.Integer(description='calling timestamp')
})
ServiceApiConfigBase.predict_request = predict_request
def __set_predict_response(self):
response_result = ServiceApiConfigBase.api.model('PredictResponse.responseResult', {
'result': fields.String(description='result'),
'content': fields.String(description='content')
})
predict_response = ServiceApiConfigBase.api.model('PredictResponse', {
'requestId': fields.String(description='request id'),
'responseResult': fields.Nested(response_result, description='responseResult'),
'timestamp': fields.Integer(description='calling timestamp')
})
ServiceApiConfigBase.predict_response = predict_response
# Path: src/config/DockerConfig.py
class DockerConfig(object):
GPT_API_KEY = MyEnvironment().get_environment_variable("OPENAPI_API_KEY", 'Your open ai key')
API_VERSION = MyEnvironment().get_environment_variable("API_VERSION", '1.0')
GPT_API_VERSION_35 = MyEnvironment().get_environment_variable("GPT_3.5", 'gpt-3.5-turbo-1106')
GPT_API_VERSION_4 = MyEnvironment().get_environment_variable("GPT_4", 'gpt-4-1106-preview')
URL_PREFIX = MyEnvironment().get_environment_variable("URL_PREFIX", '/api_rag/')
SERVICE_PORT = MyEnvironment().get_environment_variable("PORT", '5000')
API_TITLE = MyEnvironment().get_environment_variable("API_TITLE", 'API RAG Service')
API_DESCRIPTION = MyEnvironment().get_environment_variable("API_DESCRIPTION", 'API RAG Service')
PROMPT_LANGUAGE = MyEnvironment().get_environment_variable("PROMPT_LANGUAGE", "zh_cn")
# Path: src/api_rag/ModelHandler.py
class ModelHandler(ModelBaseHandler):
V1 = "v1"
def __init__(self, config):
ModelBaseHandler.__init__(self, config)
self._version = ModelHandler.V1
self.create_model()
def create_model(self):
if self._version == ModelHandler.V1:
self._predictor = APIRAGModel()
def predict(self, request, **kwargs):
# try:
LoggerHelper().log_info(u"Request: " + str(request))
contents = request["request"]["content"]
data = json.loads(contents)
if "clean_context" in list(data.keys()):
final_result = "Reset successfully."
else:
text = data["utterance"]
model_name = data["model_name"]
LoggerHelper().log_info(u"date_text_content: " + str(text))
final_result = self._predictor.predict(text,model_name)
response_predict = self.create_predict_response(request,final_result)
if response_predict is not None:
return response_predict
def create_predict_response(self, request, predict_sent):
response = {
'requestId': request['requestId'] if 'requestId' in request else '',
'timestamp': time.time(),
'response': predict_sent
}
return {
'requestId': request['requestId'] if 'requestId' in request else '',
'timestamp': time.time(),
'responseResult': {
'result': 'success',
'content': json.dumps(response, ensure_ascii=False)
}
}
# Path: service.py
import logging
from src.config.ServiceApiConfig import ServiceApiConfig
from src.config.DockerConfig import DockerConfig
from src.api_rag.ModelHandler import ModelHandler
logging.getLogger().setLevel(logging.INFO)
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
if __name__ == '__main__':
| config = ServiceApiConfig() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: DerwenAI/textgraphs
# Path: textgraphs/elem.py
class Edge:
"""
A data class representing an edge between two nodes.
"""
src_node: int
dst_node: int
kind: RelEnum
rel: str
prob: float
count: int = 1
# Path: textgraphs/elem.py
class Node: # pylint: disable=R0902
"""
A data class representing one node, i.e., an extracted phrase.
"""
node_id: int
key: str
span: typing.Union[ spacy.tokens.span.Span, spacy.tokens.token.Token ]
text: str
pos: str
kind: NodeEnum
loc: typing.List[ typing.List[ int ] ] = field(default_factory = lambda: [])
label: typing.Optional[ str ] = None
length: int = 1
sub_obj: bool = False
count: int = 0
neighbors: int = 0
weight: float = 0.0
entity: typing.List[ LinkedEntity ] = field(default_factory = lambda: [])
annotated: bool = False
def get_linked_label (
self
) -> typing.Optional[ str ]:
"""
When this node has a linked entity, return that IRI.
Otherwise return its `label` value.
returns:
a label for the linked entity
"""
if len(self.entity) > 0:
return self.entity[0].iri
return self.label
def get_name (
self
) -> str:
"""
Return a brief name for the graphical depiction of this Node.
returns:
brief label to be used in a graph
"""
if self.kind == NodeEnum.IRI:
return self.label # type: ignore
if self.kind == NodeEnum.LEM:
return self.key
return self.text
def get_stacked_count (
self
) -> int:
"""
Return a modified count, to redact verbs and linked entities from
the stack-rank partitions.
returns:
count, used for re-ranking extracted entities
"""
if self.pos == "VERB" or self.kind == NodeEnum.IRI:
return 0
return self.count
def get_pos (
self
) -> typing.Tuple[ int, int ]:
"""
Generate a position span for `OpenNRE`.
returns:
a position span needed for `OpenNRE` relation extraction
"""
position: typing.Tuple[ int, int ] = ( self.span.idx, self.span.idx + len(self.text) - 1, )
return position
# Path: textgraphs/elem.py
class NodeEnum (enum.IntEnum):
"""
Enumeration for the kinds of node categories
"""
DEP = 0 # `spaCy` parse dependency
LEM = 1 # lemmatized token
ENT = 2 # named entity
CHU = 3 # noun chunk
IRI = 4 # IRI for linked entity
def __str__ (
self
) -> str:
"""
Codec for representing as a string.
returns:
decoded string representation of the enumerated value
"""
decoder: typing.List[ str ] = [
"dep",
"lem",
"ent",
"chu",
"iri",
]
return decoder[self.value]
# Path: textgraphs/elem.py
class RelEnum (enum.IntEnum):
"""
Enumeration for the kinds of edge relations
"""
DEP = 0 # `spaCy` parse dependency
CHU = 1 # `spaCy` noun chunk
INF = 2 # `REBEL` or `OpenNRE` inferred relation
SYN = 3 # `sense2vec` inferred synonym
IRI = 4 # `DBPedia` or `Wikidata` linked entity
def __str__ (
self
) -> str:
"""
Codec for representing as a string.
returns:
decoded string representation of the enumerated value
"""
decoder: typing.List[ str ] = [
"dep",
"inf",
"syn",
"chu",
"iri",
]
return decoder[self.value]
# Path: textgraphs/graph.py
from collections import OrderedDict
from icecream import ic # pylint: disable=E0401
from .elem import Edge, Node, NodeEnum, RelEnum
import json
import typing
import networkx as nx # pylint: disable=E0401
import spacy # pylint: disable=E0401
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This class implements a generic, in-memory graph data structure used
to represent the _lemma graph_.
see copyright/license https://huggingface.co/spaces/DerwenAI/textgraphs/blob/main/README.md
"""
######################################################################
## class definitions
class SimpleGraph:
"""
An in-memory graph used to build a `MultiDiGraph` in NetworkX.
"""
def __init__ (
self
) -> None:
"""
Constructor.
"""
self.nodes: typing.Dict[ str, Node ] = OrderedDict()
| self.edges: typing.Dict[ str, Edge ] = {} |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Noubissie237/StockManagment
# Path: StockManagment/App/utils.py
def panier_cookie(request):
articles = []
commande = {
'get_panier_total':0,
'get_panier_article':0,
'produit_physique': True,
}
nombre_article = commande['get_panier_article']
try:
panier = json.loads(request.COOKIES.get('panier'))
for obj in panier:
nombre_article += panier[obj]['qte']
produit = Produit.objects.get(id=obj)
total = produit.price * panier[obj]['qte']
commande['get_panier_article'] += panier[obj]['qte']
commande['get_panier_total'] += total
article = {
'produit': {
'pk': produit.id,
'name': produit.name,
'price': produit.price,
'nombre': produit.nombre
},
'quantite': panier[obj]['qte'],
'get_total': total
}
articles.append(article)
if produit.digital == False:
commande['produit_physique'] = True
except:
pass
context = {
'articles' : articles,
'commande': commande,
'nombre_article': nombre_article
}
return context
# Path: StockManagment/App/utils.py
def data_cookie(request):
if request.user.is_authenticated:
client = request.user.client
commande, created = Commande.objects.get_or_create(client=client, complete=False)
articles = commande.commandearticle_set.all()
nombre_article = commande.get_panier_article
else:
cookie_panier = panier_cookie(request)
articles = cookie_panier['articles']
commande = cookie_panier['commande']
nombre_article = cookie_panier['nombre_article']
context = {
'articles' : articles,
'commande': commande,
'nombre_article': nombre_article
}
return context
# Path: StockManagment/App/utils.py
def getDataFromApi(request):
try:
url = "http://localhost:8000/api/prescriptions/"
response = requests.get(url)
dataToSave = response.json()
for elt in dataToSave:
if not User.objects.filter(username=elt['email']).exists():
user = User.objects.create_user(username=elt['email'], email=elt['email'], password=elt['Token'])
user.save()
if Prescription.objects.filter(email=elt['email']).exists():
pass
else:
tmp = Prescription(nom=elt['nom'], prenom=elt['prenom'], age=elt['age'], sexe=elt['sexe'], email=elt['email'],
antecedent=elt['antecedent'], prescription1=elt['prescription1'], prescription2=elt['prescription2'],
prescription3=elt['prescription3'])
tmp.save()
try:
user = User.objects.get(username=elt['email'])
client = Client.objects.create(user=user, name=elt["nom"], email=elt['email'])
print("valid")
except:
print('invalid')
return "SUCCESS"
except:
return "FAILED"
# Path: StockManagment/App/forms.py
class LoginForm(forms.Form):
username = forms.CharField(label='Nom d\'utilisateur', widget=forms.TextInput(attrs={'class': 'form-control'}))
password = forms.CharField(label='Mot de passe', widget=PasswordInputWithClass())
# Path: StockManagment/App/views.py
from django.shortcuts import render, redirect
from django.http import JsonResponse, HttpResponse
from .models import *
from django.contrib.auth.decorators import login_required
from datetime import datetime
from .utils import panier_cookie, data_cookie, getDataFromApi
from .forms import LoginForm
from django.contrib.auth import authenticate, login, logout
import json, requests
@login_required(login_url='/login')
def shop(request, *args, **kwargs):
"""Vue des produits"""
produits = Produit.objects.all()
data = data_cookie(request)
articles = data['articles']
commande = data['commande']
nombre_article = data['nombre_article']
context = {
'produits': produits,
'nombre_article': nombre_article
}
return render(request, 'shop/index.html', context)
@login_required(login_url='/login')
def panier(request, *args, **kwargs):
data = data_cookie(request)
articles = data['articles']
commande = data['commande']
nombre_article = data['nombre_article']
context = {
'articles' : articles,
'commande': commande,
'nombre_article': nombre_article
}
return render(request, 'shop/panier.html', context)
@login_required(login_url='/login')
def commande(request, *args, **kwargs):
data = data_cookie(request)
articles = data['articles']
commande = data['commande']
nombre_article = data['nombre_article']
context = {
'articles' : articles,
'commande': commande,
'nombre_article': nombre_article
}
return render(request, 'shop/commande.html', context)
@login_required(login_url='/login')
def update_article(request, *args, **kwargs):
data = json.loads(request.body)
produit_id = data['produit_id']
action = data['action']
produit = Produit.objects.get(id=produit_id)
client = request.user.client
commande, created = Commande.objects.get_or_create(client=client, complete=False)
commande_article, created = CommandeArticle.objects.get_or_create(commande=commande, produit=produit)
if action == "add":
commande_article.quantite += 1
if action == "remove":
commande_article.quantite -=1
commande_article.save()
if commande_article.quantite <= 0:
commande_article.delete()
return JsonResponse("panier modifié", safe=False)
@login_required(login_url='/login')
def commandeAnonyme(request, data):
name = data['form']['name']
username = data['form']['username']
email = data['form']['email']
phone = data['form']['phone']
| cookie_panier = panier_cookie(request) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: kokiez/raydium-convert-SOLorTokens
# Path: pools.py
def fetch_pool_keys(mint: str):
amm_info = {}
all_pools = {}
try:
# Using this so it will be faster else no option, we go the slower way.
with open('all_pools.json', 'r') as file:
all_pools = json.load(file)
amm_info = extract_pool_info(all_pools, mint)
except:
resp = requests.get('https://api.raydium.io/v2/sdk/liquidity/mainnet.json', stream=True)
pools = resp.json()
official = pools['official']
unofficial = pools['unOfficial']
all_pools = official + unofficial
# Store all_pools in a JSON file
with open('all_pools.json', 'w') as file:
json.dump(all_pools, file, default=lambda x: x.__dict__)
amm_info = extract_pool_info(all_pools, mint)
return {
'amm_id': Pubkey.from_string(amm_info['id']),
'authority': Pubkey.from_string(amm_info['authority']),
'base_mint': Pubkey.from_string(amm_info['baseMint']),
'base_decimals': amm_info['baseDecimals'],
'quote_mint': Pubkey.from_string(amm_info['quoteMint']),
'quote_decimals': amm_info['quoteDecimals'],
'lp_mint': Pubkey.from_string(amm_info['lpMint']),
'open_orders': Pubkey.from_string(amm_info['openOrders']),
'target_orders': Pubkey.from_string(amm_info['targetOrders']),
'base_vault': Pubkey.from_string(amm_info['baseVault']),
'quote_vault': Pubkey.from_string(amm_info['quoteVault']),
'market_id': Pubkey.from_string(amm_info['marketId']),
'market_base_vault': Pubkey.from_string(amm_info['marketBaseVault']),
'market_quote_vault': Pubkey.from_string(amm_info['marketQuoteVault']),
'market_authority': Pubkey.from_string(amm_info['marketAuthority']),
'bids': Pubkey.from_string(amm_info['marketBids']),
'asks': Pubkey.from_string(amm_info['marketAsks']),
'event_queue': Pubkey.from_string(amm_info['marketEventQueue'])
}
# Path: pools.py
def make_simulate_pool_info_instruction(accounts):
keys = [
AccountMeta(pubkey=accounts["amm_id"], is_signer=False, is_writable=False),
AccountMeta(pubkey=accounts["authority"], is_signer=False, is_writable=False),
AccountMeta(pubkey=accounts["open_orders"], is_signer=False, is_writable=False),
AccountMeta(pubkey=accounts["base_vault"], is_signer=False, is_writable=False),
AccountMeta(pubkey=accounts["quote_vault"], is_signer=False, is_writable=False),
AccountMeta(pubkey=accounts["lp_mint"], is_signer=False, is_writable=False),
AccountMeta(pubkey=accounts["market_id"], is_signer=False, is_writable=False),
AccountMeta(pubkey=accounts['event_queue'], is_signer=False, is_writable=False),
]
data = POOL_INFO_LAYOUT.build(
dict(
instruction=12,
simulate_type=0
)
)
return Instruction(AMM_PROGRAM_ID, data, keys)
# Path: main.py
from solana.rpc.commitment import Commitment
from solana.rpc.api import Client
from solana.transaction import Transaction
from solders.keypair import Keypair
from pools import fetch_pool_keys, make_simulate_pool_info_instruction
from ast import literal_eval
import re
LIQUIDITY_FEES_NUMERATOR = 25
LIQUIDITY_FEES_DENOMINATOR = 10000
"""
Required Variables
"""
endpoint = "your_rpc_url"
payer = Keypair.from_base58_string("your_private_key")
token = "ca of your mint/mint address"
solana_client = Client(endpoint, commitment=Commitment("confirmed"), blockhash_cache=True)
def calculateAmountOut(amount, pool_info):
status = pool_info['status']
SWAP_decimals = pool_info['coin_decimals'] #swap coin
SOL_decimals = pool_info['pc_decimals'] #SOL
COIN_lp_decimals = pool_info['lp_decimals'] #swap coin
pool_SOL_amount = pool_info['pool_pc_amount'] #sol
pool_SWAP_amount = pool_info['pool_coin_amount'] #coin
Coin_pool_lp_supply = pool_info['pool_lp_supply'] #coin
reserve_in = pool_SOL_amount
reserve_out = pool_SWAP_amount
current_price = reserve_out / reserve_in
# print(f"Current Price in SOL: {current_price:.12f}")
amount_in = amount * 10 ** SOL_decimals
Fees = (amount_in * LIQUIDITY_FEES_NUMERATOR)/LIQUIDITY_FEES_DENOMINATOR
amount_in_with_fee = amount_in - Fees
amountOutRaw = (reserve_out * amount_in_with_fee) / (reserve_in + amount_in_with_fee)
# Slippage = 1 + slippage
# minimumAmountOut = amountOutRaw / slippage
return amountOutRaw / 10 ** SWAP_decimals
def calculateAmountIn(amount, pool_info):
SWAP_decimals = pool_info['coin_decimals'] #swap coin
SOL_decimals = pool_info['pc_decimals'] #SOL
COIN_lp_decimals = pool_info['lp_decimals'] #swap coin
pool_SOL_amount = pool_info['pool_pc_amount'] #sol
pool_SWAP_amount = pool_info['pool_coin_amount'] #coin
Coin_pool_lp_supply = pool_info['pool_lp_supply'] #coin
reserve_in = pool_SWAP_amount
reserve_out = pool_SOL_amount
current_price = reserve_out / reserve_in
# print(f"Current Price in SOL: {current_price:.12f}")
amount_in = amount * 10 ** SWAP_decimals
Fees = (amount_in * LIQUIDITY_FEES_NUMERATOR)/LIQUIDITY_FEES_DENOMINATOR
amount_in_with_fee = amount_in - Fees
amountOutRaw = (reserve_out * amount_in_with_fee) / (reserve_in + amount_in_with_fee)
# Slippage = 1 + slippage
# minimumAmountOut = amountOutRaw / slippage
return amountOutRaw / 10 ** SOL_decimals
def PoolInfo(mint):
while True:
quote = ""
| pool_keys = fetch_pool_keys(mint)
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: proger/nanokitchen
# Path: structured_linear.py
class StructuredLinear(nn.Module):
def __init__(self, in_features, out_features, bias=True, device=None, dtype=None):
"""Subclasses should call reset_parameters
"""
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.in_features = in_features
self.out_features = out_features
# Subclasses may override {in,out}_features_extended
if not hasattr(self, 'in_features_extended'):
self.in_features_extended = in_features
if not hasattr(self, 'out_features_extended'):
self.out_features_extended = out_features
if bias:
self.bias = nn.Parameter(torch.zeros(out_features, **factory_kwargs))
else:
self.register_parameter('bias', None)
def reset_parameters(self) -> None:
self.set_weights_from_dense_init(dense_init_fn_=partial(init.kaiming_uniform_, a=math.sqrt(5)))
self.reset_parameters_bias()
def set_weights_from_dense_init(self, dense_init_fn_):
raise NotImplementedError
def reset_parameters_bias(self):
if self.bias is not None:
fan_in = self.bias.shape[-1]
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
init.uniform_(self.bias, -bound, bound)
@property
def saving(self):
raise NotImplementedError
def convert_to_dense_weight(self):
factory_kwargs = {'device': self.weight.device, 'dtype': self.weight.dtype}
dense_weight = self.forward_matmul(torch.eye(self.in_features, **factory_kwargs)).T
return dense_weight
def preprocess(self, x):
in_features = x.shape[-1]
if in_features < self.in_features_extended:
x = F.pad(x, (0, self.in_features_extended - in_features))
return x
def postprocess(self, output):
out_features_extended = output.shape[-1]
if out_features_extended > self.out_features:
output = output[..., :self.out_features]
return output
def forward_matmul(self, x):
raise NotImplementedError
def forward(self, x):
output = self.forward_matmul(x)
# Convert bias to output.dtype in case of AMP, otherwise bias and activation will be in FP32
return (output + self.bias.to(dtype=output.dtype)) if self.bias is not None else output
# Path: blockdiag_multiply.py
def blockdiag_weight_to_dense_weight(weight):
def blockdiag_multiply_reference(x, weight):
def forward(ctx, x, weight):
def backward(ctx, dout):
class BlockdiagMultiply(torch.autograd.Function):
# Path: blockdiag_linear.py
import math
import torch
import torch.nn as nn
from einops import rearrange
from structured_linear import StructuredLinear
from blockdiag_multiply import blockdiag_multiply
# Adapted from https://github.com/HazyResearch/fly/tree/master/src/models/layers
class BlockdiagLinear(StructuredLinear):
def __init__(self, *args, nblocks=4, shuffle=False, **kwargs):
"""shuffle: apply channel_shuffle operation before the matmul as in ShuffleNet
"""
super().__init__(*args, **kwargs)
in_blksz = int(math.ceil(self.in_features / nblocks))
out_blksz = int(math.ceil(self.out_features / nblocks))
self.in_features_extended = in_blksz * nblocks
self.out_features_extended = out_blksz * nblocks
self.shuffle = shuffle
self.weight = nn.Parameter(torch.empty(nblocks, out_blksz, in_blksz))
self.reset_parameters()
def set_weights_from_dense_init(self, dense_init_fn_):
dense_weight = torch.empty(self.out_features_extended, self.in_features_extended,
device=self.weight.device, dtype=self.weight.dtype)
dense_init_fn_(dense_weight)
# Scale by sqrt because the weight is sparse
scaling = math.sqrt(dense_weight.numel() / self.weight.numel())
dense_weight *= scaling
with torch.no_grad():
nblocks = self.weight.shape[0]
self.weight.copy_(rearrange(dense_weight, '(b o) (b1 i) -> b b1 o i',
b=nblocks, b1=nblocks)[0])
@property
def saving(self):
return self.weight.numel() / (self.in_features * self.out_features)
def forward_matmul(self, x):
x = self.preprocess(x)
if self.shuffle:
x = rearrange(x, '... (group c_per_group) -> ... (c_per_group group)',
group=self.weight.shape[0]) # group=nblocks
| output = blockdiag_multiply(x, self.weight) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: karloskar/homeassistant-goecontroller-mqtt
# Path: custom_components/goecontroller_mqtt/definitions/switch.py
SWITCHES: tuple[GoEControllerSwitchEntityDescription, ...] = (
GoEControllerSwitchEntityDescription(
key="tse",
name="Time server enabled",
entity_category=EntityCategory.CONFIG,
device_class=None,
entity_registry_enabled_default=False,
disabled=True,
disabled_reason="Not exposed via MQTT in firmware 053.1",
),
GoEControllerSwitchEntityDescription(
key="hsa",
name="HTTP STA authentication",
entity_category=EntityCategory.CONFIG,
device_class=None,
entity_registry_enabled_default=False,
disabled=True,
disabled_reason="Not exposed via MQTT in firmware 053.1",
),
GoEControllerSwitchEntityDescription(
key="cwe",
name="Cloud websocket enabled",
entity_category=EntityCategory.CONFIG,
device_class=None,
entity_registry_enabled_default=False,
disabled=True,
disabled_reason="Not exposed via MQTT in firmware 053.1",
),
)
# Path: custom_components/goecontroller_mqtt/definitions/switch.py
class GoEControllerSwitchEntityDescription(GoEControllerEntityDescription, SwitchEntityDescription):
"""Switch entity description for go-eController."""
domain: str = "switch"
payload_on: str = "true"
payload_off: str = "false"
optimistic: bool = False
# Path: custom_components/goecontroller_mqtt/entity.py
class GoEControllerEntity(Entity):
"""Common go-eController entity."""
def __init__(
self,
config_entry: config_entries.ConfigEntry,
description: GoEControllerEntityDescription,
) -> None:
"""Initialize the sensor."""
topic_prefix = config_entry.data[CONF_TOPIC_PREFIX]
serial_number = config_entry.data[CONF_SERIAL_NUMBER]
self._topic = f"{topic_prefix}/{serial_number}/{description.key}"
slug = slugify(self._topic.replace("/", "_"))
self.entity_id = f"{description.domain}.{slug}"
parsed_attribute = description.attribute
if isinstance(description.attribute, tuple):
parsed_attribute = "-".join(description.attribute)
self._attr_unique_id = "-".join(
[serial_number, description.domain, description.key, parsed_attribute]
)
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, serial_number)},
name=config_entry.title,
manufacturer=DEVICE_INFO_MANUFACTURER,
model=DEVICE_INFO_MODEL,
)
# Path: custom_components/goecontroller_mqtt/switch.py
import logging
from homeassistant import config_entries, core
from homeassistant.components import mqtt
from homeassistant.components.switch import SwitchEntity
from homeassistant.core import callback
from .definitions.switch import SWITCHES, GoEControllerSwitchEntityDescription
from .entity import GoEControllerEntity
"""The go-eController (MQTT) switch."""
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: core.HomeAssistant,
config_entry: config_entries.ConfigEntry,
async_add_entities,
):
"""Config entry setup."""
async_add_entities(
GoEControllerSwitch(config_entry, description)
for description in SWITCHES
if not description.disabled
)
class GoEControllerSwitch(GoEControllerEntity, SwitchEntity):
"""Representation of a go-eController switch that is updated via MQTT."""
| entity_description: GoEControllerSwitchEntityDescription |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: T0kyoB0y/PotatoWidgets
# Path: PotatoWidgets/Variable/_Listener.py
class Listener(Variable):
def __init__(self, callback, initial_value=None):
super().__init__(initial_value)
self._callback = callback
self._thread = None
self._stop_thread = threading.Event()
self.start_listening()
def stop_listening(self):
if self._thread and self._thread.is_alive():
self._stop_thread.set()
self._thread.join()
def start_listening(self):
if self._thread and self._thread.is_alive():
print(f"{self} is already listening")
return
self._stop_thread.clear()
self._thread = threading.Thread(target=lambda: self._callback(self))
self._thread.start()
def get_value(self):
return self._value
def set_value(self, new_value):
self._value = new_value
self.emit("valuechanged")
def __str__(self):
return str(self._value)
# Path: PotatoWidgets/Variable/_Poll.py
class Poll(Variable):
def __init__(self, interval, callback, initial_value=None):
super().__init__(initial_value or callback())
self._interval = self._parse_interval(interval)
self._callback = callback
self._timeout_id = None
self.start_poll()
def _parse_interval(self, interval):
try:
if isinstance(interval, str):
unit = interval[-1].lower()
value = int(interval[:-1])
if unit == "s":
return value * 1000
elif unit == "m":
return value * 60 * 1000
elif unit == "h":
return value * 60 * 60 * 1000
elif isinstance(interval, int):
return interval
except (ValueError, IndexError):
return int(interval)
def is_polling(self):
return bool(self._timeout_id)
def stop_poll(self):
if self._timeout_id:
GLib.source_remove(self._timeout_id)
self._timeout_id = None
else:
print(f"{self} has no poll running")
def start_poll(self):
if self.is_polling():
print(f"{self} is already polling")
return
self._timeout_id = GLib.timeout_add(
priority=GLib.PRIORITY_DEFAULT_IDLE,
interval=self._interval,
function=self._poll_callback,
)
def _poll_callback(self):
self.set_value(self._callback())
return GLib.SOURCE_CONTINUE
def get_value(self):
return self._value
def set_value(self, new_value):
self._value = new_value
self.emit("valuechanged")
def __str__(self):
return str(self._value)
# Path: PotatoWidgets/Variable/_Variable.py
class Variable(GObject.Object):
valuechanged = GObject.Signal()
def __init__(self, initial_value):
super().__init__()
self._value = initial_value
def get_value(self):
return self._value
def set_value(self, new_value):
self._value = new_value
self.emit("valuechanged")
def initial_value(self, value):
self._value = value
def __str__(self):
return str(self._value)
# Path: PotatoWidgets/Widget/_Common/_BasicProps.py
from ...__Import import *
from ...Variable import Listener, Poll, Variable
class BasicProps(Gtk.Widget):
def __init__(
self,
halign,
valign,
hexpand,
vexpand,
active,
visible,
classname,
# tooltip,
css,
size=[10, 10],
):
Gtk.Widget.__init__(self)
self.set_hexpand(True if hexpand else False)
self.set_vexpand(True if vexpand else False)
self.set_halign(halign)
self.set_valign(valign)
self.set_visible(visible)
self.set_sensitive(active) if active is not None else None
self.set_classname(classname)
self.__clasif_size(size)
self.apply_css(css) if css else None
for key, value in locals().items():
callback = {
"halign": self.set_halign,
"valign": self.set_valign,
"hexpand": self.set_hexpand,
"vexpand": self.set_vexpand,
"active": self.set_sensitive,
"visible": self.set_visible,
"size": self.set_size,
"classname": self.set_classname,
}.get(key)
self.bind(value, callback) if callback else None
def set_size(self, size):
self.__clasif_size(size)
def set_halign(self, param):
super().set_halign(self.__clasif_align(str(param)))
def set_valign(self, param):
super().set_valign(self.__clasif_align(str(param)))
def __clasif_size(self, size):
if isinstance(size, int):
self.set_size_request(size, size)
elif isinstance(size, list):
if len(size) == 2:
self.set_size_request(size[0], size[1])
elif len(size) == 1:
self.set_size_request(size[0], size[0])
def __clasif_align(self, param):
dict = {
"fill": Gtk.Align.FILL,
"start": Gtk.Align.START,
"end": Gtk.Align.END,
"center": Gtk.Align.CENTER,
"baseline": Gtk.Align.BASELINE,
}
return dict.get(param.lower(), Gtk.Align.FILL)
def set_classname(self, param):
if isinstance(param, (str)):
context = self.get_style_context()
[context.add_class(i) for i in param.split(" ") if i != " "]
elif isinstance(param, (list)):
for i in param:
| if isinstance(i, (Listener, Variable, Poll)): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Zerohertz/Streamlit-Quant
# Path: lib/layout.py
def _main():
layout = _default()
layout.height = 500 * st.session_state["scale"]
layout.width = 1000
layout.xaxis = {
"type": "category",
"gridcolor": "black",
"tickangle": -45,
"tickfont": {"color": "black"},
"showgrid": True,
"tickmode": "auto",
"nticks": 20,
"rangeslider": {"visible": False},
}
layout.yaxis = {
"gridcolor": "black",
"tickprefix": "₩",
"tickformat": ",",
"tickfont": {"color": "black"},
"showgrid": True,
"autorange": True,
}
if not st.session_state["cache"]["vis_signals"]:
return layout
layout.yaxis2 = {
"overlaying": "y",
"side": "right",
"tickfont": {"color": "white"},
"showgrid": False,
}
layout.shapes = st.session_state["cache"]["transaction_vert"]
if st.session_state["cache"]["method"] != "Quant":
layout.yaxis3 = {
"overlaying": "y",
"side": "right",
"tickfont": {"color": "white"},
"showgrid": False,
}
return layout
# Path: lib/layout.py
def _transaction():
layout = _default()
layout.height = 400 * st.session_state["scale"]
layout.width = 1000
return layout
# Path: lib/util.py
def _color(cnt, alpha=0.99, palette="husl"):
colors = []
colors_ = zz.plot.color(cnt, uint8=True, palette=palette)
if cnt == 1:
colors_ = [colors_]
for color_ in colors_:
colors.append("rgba(" + ",".join(list(map(str, color_))) + f",{alpha})")
return colors
# Path: lib/visual.py
import plotly.graph_objs as go
import streamlit as st
import zerohertzLib as zz
from plotly.subplots import make_subplots
from lib.layout import _main, _transaction
from lib.util import _color
def candle():
data, xdata = st.session_state["cache"]["data"], st.session_state["cache"]["xdata"]
st.session_state["cache"]["candle"] = go.Candlestick(
x=xdata,
open=data.Open,
high=data.High,
low=data.Low,
close=data.Close,
increasing={"line": {"color": "red"}},
decreasing={"line": {"color": "blue"}},
name=st.session_state["cache"]["name"],
)
st.session_state["logger"].info(
f"""[Plot] Candle Chart: {st.session_state["cache"]["name"]} ({st.session_state["cache"]["symbol"]})"""
)
def moving_average():
xdata = st.session_state["cache"]["xdata"]
st.session_state["cache"]["ma"] = []
| colors = _color(4, 0.5, "Set1") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: acman/py_june
# Path: posts/models.py
class Post(SlugModel):
title = models.CharField(max_length=50)
content = models.TextField(max_length=500, blank=True)
author = models.ForeignKey("users.ForumUser", on_delete=models.CASCADE)
category = models.ForeignKey("categories.Category", on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
is_active = models.BooleanField(default=True)
class Meta:
db_table = "posts"
verbose_name = "Post"
verbose_name_plural = "Posts"
ordering = ["-created_at"]
def __str__(self) -> str:
return self.title
# Path: comments/forms.py
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ["title", "content"]
def __init__(self, *args: tuple, **kwargs: dict) -> None:
super(CommentForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_method = "post"
self.helper.layout = Layout(
"title",
"content",
Submit(
"submit", "Create Comment", css_class="btn waves-effect waves-light"
),
)
self.field_order = ["title", "content"]
# Path: comments/models.py
class Comment(models.Model):
title = models.CharField(max_length=50)
content = models.TextField(max_length=500, blank=True)
author = models.ForeignKey("users.ForumUser", on_delete=models.CASCADE)
post = models.ForeignKey("posts.Post", on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
is_active = models.BooleanField(default=True)
class Meta:
db_table = "comments"
verbose_name = "Comment"
verbose_name_plural = "Comments"
ordering = ["-created_at"]
def __str__(self) -> str:
return self.title
# Path: comments/views.py
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.http import HttpRequest, HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.views import View
from posts.models import Post
from .forms import CommentForm
from .models import Comment
class CreateCommentView(LoginRequiredMixin, View):
template_name = "comments/comment_form.html"
login_url = "/users/login/"
def get(self, request: HttpRequest, post_slug: str) -> HttpResponse:
post = get_object_or_404(Post, slug=post_slug)
form = CommentForm()
return render(request, self.template_name, {"form": form, "post": post})
def post(self, request: HttpRequest, post_slug: str) -> HttpResponse:
form = CommentForm(request.POST)
post = get_object_or_404(Post, slug=post_slug)
if form.is_valid():
comment = form.save(commit=False)
comment.author = self.request.user
comment.post_id = post.pk
comment.save()
return redirect("categories:detail", category_slug=post.category.slug)
return render(request, self.template_name, {"form": form, "post": post})
class UpdateCommentView(UserPassesTestMixin, View):
template_name = "comments/comment_update.html"
def test_func(self) -> bool:
comment_pk = self.kwargs.get("comment_pk")
| comment = get_object_or_404(Comment, pk=comment_pk) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: pkariz/grin-explorer
# Path: backend/api/models.py
class Block(TimeStampedModel):
blockchain = models.ForeignKey(
Blockchain, related_name='blocks', on_delete=models.CASCADE)
hash = models.CharField(
primary_key=True,
max_length=64,
validators=[MinLengthValidator(64)],
db_index=True,
)
height = models.PositiveIntegerField(db_index=True)
timestamp = models.DateTimeField(db_index=True)
header = models.ForeignKey(
'BlockHeader', related_name='block', on_delete=models.CASCADE)
prev_hash = models.CharField(
max_length=64,
null=True,
blank=True,
validators=[MinLengthValidator(64)],
)
nr_inputs = models.PositiveIntegerField(default=0)
nr_outputs = models.PositiveIntegerField(default=0)
nr_kernels = models.PositiveIntegerField(default=0)
# when reorg is set it means this block is part of a reorg and not the main
# chain
reorg = models.ForeignKey(
'Reorg', null=True, related_name='blocks', on_delete=models.CASCADE)
def __str__(self):
suffix = ''
if self.reorg:
suffix = ' Reorged: {}'.format(self.reorg.id)
return '{}: {} (prev: {})'.format(
self.height, self.hash, self.prev_hash)
def get_next_block(self):
return Block.objects.filter(prev_hash=self.hash).first()
def get_previous_block(self):
return Block.objects.filter(hash=self.prev_hash).first()
def full_print(self, prefix=''):
"""Used for developing and debugging."""
print('---------------------------------------------------------------')
print(f'{prefix}Block {self.height}: {self.hash}, reorg: {self.reorg}')
print(f'{prefix} INPUTS:')
for input in self.inputs.all():
print(f'{prefix} {input}, output: {input.output}')
print(f'{prefix} OUTPUTS:')
for output in self.outputs.all():
print(f'{prefix} {output}')
print(f'{prefix} KERNELS:')
for kernel in self.kernels.all():
print(f'{prefix} {kernel}')
print('---------------------------------------------------------------')
# Path: backend/api/models.py
class Reorg(TimeStampedModel):
id = models.BigAutoField(primary_key=True)
blockchain = models.ForeignKey(
Blockchain, related_name='reorgs', on_delete=models.CASCADE)
# start_reorg_block and end_reorg_block define starting and ending block,
# which were reorged
start_reorg_block = models.ForeignKey(
Block, related_name='start_reorgs', on_delete=models.CASCADE)
end_reorg_block = models.ForeignKey(
Block, related_name='end_reorgs', on_delete=models.CASCADE)
# start_main_block defines starting block which is the new start of the main
# chain - the block that replaced start_reorg_block. We usually don't know
# which the ending block is when we spot the reorg, so we don't store it
# (we don't even have it in DB at that time yet since we usually get them
# incrementally in the order they're accepted).
start_main_block = models.ForeignKey(
Block, related_name='start_mains', on_delete=models.CASCADE)
def __str__(self):
return '{}: start: {}, end: {}'.format(
self.blockchain.slug, self.start_reorg_block, self.end_reorg_block)
# Path: backend/api/helpers.py
def fix_outputs_and_inputs_from_reorg(reorg):
"""
Fix Output.spent and Input.output on instances that were affected by the
given reorg. Note that due to the order of block fetching (sometimes
descending by height) we might have corrupted Output/Input instances also on
the reorged block. For example if block 102.1 in a reorg creates output with
commitment 'd' and the same commitment is created in block 102 but we first
fetch block 103 which spends it, then it will update output 'd' from 102.1
because it doesn't yet know that it's a part of a reorg (due to the way we
implemented things). We also need to fix outputs which were spent in a reorg
but not in the main chain and vice-versa.
"""
# solve reorged part
reorged_blocks = get_blocks_between(
reorg.start_reorg_block, reorg.end_reorg_block)
reorg_inputs = Input.objects.filter(block__in=reorged_blocks)
reorg_outputs = Output.objects.filter(block__in=reorged_blocks)
for output in reorg_outputs:
matching_input = reorg_inputs\
.filter(commitment=output.commitment)\
.first()
output.spent = False
if matching_input:
output.spent = True
matching_input.output = output
matching_input.save()
output.save()
# NOTE: some redundancy in this loop, but reorgs are rare so it's ok
for input in reorg_inputs:
matching_output = reorg_outputs\
.filter(commitment=input.commitment)\
.first()
if not matching_output:
# part of the main chain before the reorg happened, fix it there
matching_output = Output.objects.filter(
block__reorg=None, commitment=input.commitment).first()
if matching_output:
matching_output.spent = False
matching_output.save()
input.output = matching_output
input.save()
# solve main part
main_blocks = Block.objects\
.filter(height__gte=reorg.start_main_block.height, reorg=None)\
.order_by('height')
for block in main_blocks:
for input in block.inputs.all():
matching_output = Output.objects.filter(
block__reorg=None, commitment=input.commitment).first()
if matching_output:
matching_output.spent = True
matching_output.save()
input.output = matching_output
input.save()
# Path: backend/api/signals/receivers.py
from django.db.models.signals import post_save
from django.dispatch import receiver
from backend.api.models import Block, Reorg
from backend.api.helpers import fix_outputs_and_inputs_from_reorg
import logging
logger = logging.getLogger(__name__)
@receiver(
post_save,
| sender=Block, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: CodeWithEmad/num2fa
# Path: num2fa/constants.py
DEFAULT_SCIENTIFIC_SEPARATOR = " در ده به توان "
# Path: num2fa/constants.py
WORDS_DECIMAL_SEPARATOR = " و "
# Path: num2fa/constants.py
WORDS_FRACTION_SEPARATOR = " "
# Path: num2fa/constants.py
WORDS_NEGATIVE = "منفی "
# Path: num2fa/constants.py
ZERO = "صفر"
# Path: num2fa/utils.py
def _natural_words(str_num: str) -> str:
if str_num == "0":
return ZERO
length = len(str_num)
if length > len(CLASSES) * 3:
raise ValueError("out of range")
modulo_3 = length % 3
if modulo_3:
str_num = "0" * (3 - modulo_3) + str_num
length += 3 - modulo_3
groups = length // 3
group = groups
natural_words = ""
while group > 0:
three_digit = str_num[group * 3 - 3 : group * 3]
word3 = _three_digit_words(int(three_digit))
if word3 and group != groups:
if natural_words:
natural_words = (
word3
+ CLASSES[groups - group]
+ WORDS_DECIMAL_SEPARATOR
+ natural_words
)
else:
natural_words = word3 + CLASSES[groups - group]
else:
natural_words = word3 + natural_words
group -= 1
return natural_words
# Path: num2fa/utils.py
def _normalize_str(number: str) -> str:
"""Normalize the input number string."""
return str(number).strip().translate(NORMALIZATION_TABLE)
# Path: num2fa/utils.py
def _point_words(
number: str,
decimal_separator: str,
) -> str:
before_p, p, after_p = number.partition(".")
if after_p:
if before_p == "0":
if after_p == "0":
return ZERO
return _natural_words(after_p) + DECIMAL_PLACES[len(after_p)]
if after_p != "0":
return (
_natural_words(before_p)
+ decimal_separator
+ _natural_words(after_p)
+ DECIMAL_PLACES[len(after_p)]
)
return _natural_words(before_p)
return _natural_words(before_p)
# Path: num2fa/converters/word_converter.py
from decimal import Decimal
from fractions import Fraction
from functools import singledispatch
from typing import Union
from num2fa.constants import (
DEFAULT_SCIENTIFIC_SEPARATOR,
WORDS_DECIMAL_SEPARATOR,
WORDS_FRACTION_SEPARATOR,
WORDS_NEGATIVE,
ZERO,
)
from num2fa.utils import _natural_words, _normalize_str, _point_words
"""Provide functions to convert a number to Persian words."""
def _exp_words(
number: str,
positive: str,
negative: str,
decimal_separator: str,
scientific_separator: str,
) -> str:
# exponent
base, e, exponent = number.partition("e")
if exponent:
return (
_point_words(base, decimal_separator)
+ scientific_separator
+ words(int(exponent), positive, negative)
)
return _point_words(base, decimal_separator)
@singledispatch
def words(
number: Union[int, float, str, Decimal, Fraction],
positive: str = "",
negative: str = WORDS_NEGATIVE,
decimal_separator: str = WORDS_DECIMAL_SEPARATOR,
fraction_separator: str = WORDS_FRACTION_SEPARATOR,
ordinal_denominator: bool = True,
scientific_separator: str = DEFAULT_SCIENTIFIC_SEPARATOR,
) -> str:
"""Return the word form of number.
If input is a string it should be in the form of a valid Python
representation for one of the other accepted types. The only exceptions are
that digits can be in Persian, for example words('۴۲') is valid.
"""
raise TypeError("invalid input type for words function", number)
@words.register(str)
@words.register(Decimal)
def _(
number: str,
positive: str = "",
negative: str = WORDS_NEGATIVE,
decimal_separator: str = WORDS_DECIMAL_SEPARATOR,
fraction_separator: str = WORDS_FRACTION_SEPARATOR,
ordinal_denominator: bool = True,
scientific_separator: str = DEFAULT_SCIENTIFIC_SEPARATOR,
) -> str:
# Normalize the number string
number = _normalize_str(number)
# sign
c0 = number[0]
if c0 == "-":
sign = negative
number = number[1:]
elif c0 == "0":
sign = ""
else:
sign = positive
numerator, e, denominator = number.partition("/")
if denominator:
if ordinal_denominator:
return (
sign
| + _natural_words(numerator) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: the-seeds/cardinal
# Path: src/cardinal/core/schema/extractor.py
class Extractor(ABC):
@abstractmethod
def load(self, input_files: List[Path], user_id: str, verbose: Optional[bool] = False) -> None:
r"""
Loads the files into database.
Args:
input_files: a list of paths to input files.
user_id: the user id.
verbose: whether or not to show the process bar.
"""
...
# Path: src/cardinal/core/schema/leaf.py
class Leaf(LeafIndex):
content: str
# Path: src/cardinal/core/schema/leaf.py
class LeafIndex(BaseModel):
leaf_id: str = Field(default_factory=lambda: uuid.uuid4().hex)
user_id: str
# Path: src/cardinal/core/splitter/text_splitter.py
class CJKTextSplitter(TextSplitter):
def split(self, text: str) -> List[str]:
text = re.sub(r"\n{3,}", r"\n", text)
text = re.sub(r" {3,}", r" ", text)
text = re.sub(r"([。!?;])([^’”])", r"\1\n\2", text) # split with CJK stops
text = re.sub(r"(\…{2})([^’”])", r"\1\n\2", text) # split with CJK ellipsis
text = re.sub(r"([。!?;][’”]{0,2})([^,。!?;])", r"\1\n\2", text)
text = text.rstrip()
return super().split(text)
# Path: src/cardinal/core/extractor/base_extractor.py
import os
from multiprocessing import Pool
from pathlib import Path
from typing import TYPE_CHECKING, List, Optional
from tqdm import tqdm
from ..schema import Extractor, Leaf, LeafIndex
from ..splitter import CJKTextSplitter
from ..model import EmbedOpenAI
from ..schema import StringKeyedStorage, VectorStore
from ..model import EmbedOpenAI
from ..storage import RedisStorage
from ..vectorstore import Milvus
if TYPE_CHECKING:
class BaseExtractor(Extractor):
def __init__(
self, vectorizer: "EmbedOpenAI", storage: "StringKeyedStorage[Leaf]", vectorstore: "VectorStore[LeafIndex]"
) -> None:
self._vectorizer = vectorizer
self._storage = storage
self._vectorstore = vectorstore
self._splitter = CJKTextSplitter()
def load(self, input_files: List[Path], user_id: str, verbose: Optional[bool] = False) -> None:
file_contents: List[str] = []
for file_path in tqdm(input_files, desc="Extract content", disable=(not verbose)):
if file_path.suffix == ".txt":
with open(file_path, "r", encoding="utf-8") as f:
file_contents.append(f.read())
else:
raise NotImplementedError
text_chunks = []
with Pool(processes=int(os.environ.get("NUM_CPU_CORE"))) as pool:
for chunks in tqdm(
pool.imap_unordered(self._splitter.split, file_contents),
total=len(file_contents),
desc="Split content",
disable=(not verbose),
):
text_chunks.extend(chunks)
leaf_indexes = []
for chunk in tqdm(text_chunks, desc="Build index", disable=(not verbose)):
| leaf_index = LeafIndex(user_id=user_id) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: datrocity/pond
# Path: pond/conventions.py
METADATA_DIRNAME = '_pond'
# Path: pond/conventions.py
MANIFEST_FILENAME = 'manifest.yml'
# Path: pond/conventions.py
def version_data_location(version_location: str, data_filename: str) -> str:
return urijoinpath(version_location, data_filename)
# Path: pond/conventions.py
def version_manifest_location(version_location: str) -> str:
""" Manifest location with respect to a version root. """
return urijoinpath(version_location, METADATA_DIRNAME, MANIFEST_FILENAME)
# Path: pond/conventions.py
def version_uri(datastore_id: str, location: str, artifact_name: str, version_name: VersionName):
uri = f'pond://{datastore_id}/{location}/{artifact_name}/{str(version_name)}'
return uri
# Path: pond/conventions.py
def urijoinpath(*parts: str) -> str:
"""Joins two uri path components, also ensure the right part does not end with a slash"""
# TODO: use os.path.join
return '/'.join([part.rstrip('/') for part in parts])
# Path: pond/version_name.py
class SimpleVersionName(VersionName):
"""Simple version name are just an integer number (greater than 0) prefixed with "v" when
rendered as string."""
_FORMAT = re.compile('^v?([1-9][0-9]*)$')
# --- VersionName class interface
@classmethod
def from_string(cls, version_name: str) -> 'SimpleVersionName':
match = SimpleVersionName._FORMAT.match(version_name)
if not match:
raise InvalidVersionName(version_name)
return cls(int(match[1]))
@classmethod
def next(cls, prev: Optional['VersionName'] = None) -> VersionName:
if prev is None:
next_ = SimpleVersionName(1)
elif not isinstance(prev, SimpleVersionName):
raise IncompatibleVersionName(prev, SimpleVersionName)
else:
next_ = SimpleVersionName(prev.version_number + 1)
return next_
def __init__(self, version_number: int):
self.version_number = version_number
# -- VersionName protected interface
def _partial_compare(self, other: VersionName) -> Optional[int]:
if isinstance(other, SimpleVersionName):
return 0 if self.version_number == other.version_number else (
-1 if self.version_number < other.version_number else 1)
return None
# -- Magic methods
def __hash__(self) -> int:
return hash(self.version_number)
def __str__(self) -> str:
return f'v{self.version_number}'
# Path: tests/test_conventions.py
from pond.conventions import (
METADATA_DIRNAME,
MANIFEST_FILENAME,
version_data_location,
version_manifest_location,
version_uri,
urijoinpath,
)
from pond.version_name import SimpleVersionName
def test_urijoinpath():
joined = urijoinpath('a', 'b/', 'c/')
expected = 'a/b/c'
assert joined == expected
def test_data_location():
| location = version_data_location('abc/', 'blah.bin') |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Zitronenjoghurt/Colonaut
# Path: src/utils/file_operations.py
def construct_path(relative_path: str) -> str:
path_parts = relative_path.split("/")
absolute_path = os.path.join(ROOT_DIR, *path_parts)
return absolute_path
# Path: src/utils/file_operations.py
def files_in_directory(path: str, suffix: Optional[str] = None) -> list[str]:
if not os.path.exists(path):
raise ValueError(f"Directory {path} does not exist.")
files = []
for file in os.listdir(path):
if suffix is not None:
if suffix in file:
files.append(file)
else:
files.append(file)
return files
# Path: src/utils/file_operations.py
def file_to_dict(file_path: str) -> dict:
with open(file_path, 'r', encoding='utf-8') as f:
data = json.load(f)
if not isinstance(data, dict):
raise RuntimeError("Deserialized data is not a dictionary.")
return data
# Path: src/utils/file_operations.py
def str_to_file(file_path: str, string: str):
with open(file_path, 'w', encoding='utf-8') as f:
f.write(string)
# Path: src/constants/locales.py
class Locales:
# Common
FAILURE = "failure"
SUCCESS = "success"
# Descriptions
ACCELEROMETER_DESCRIPTION = "accelerometer_description"
BATTERY_DESCRIPTION = "battery_description"
HULL_DESCRIPTION = "hull_description"
INFRARED_SPECTROMETER_DESCRIPTION = "infrared_spectrometer_description"
LASER_ALTIMETER_DESCRIPTION = "laser_altimeter_description"
NEUTRON_DENSITOMETER_DESCRIPTION = "neutron_densitometer_description"
RADIO_TELEMETRY_DESCRIPTION = "radio_telemetry_description"
SOLAR_PANEL_DESCRIPTION = "solar_panel_description"
# Messages
BATTERY_CHARGED_BY = "battery_charged_by"
BATTERY_DISTRIBUTED_ENERGY = "battery_distributed_energy"
BATTERY_FULLY_CHARGED = "battery_fully_charged"
BATTERY_WARNING_NET_NEGATIVE_ENERGY = "battery_warning_net_negative_energy"
SOLAR_PANEL_COLLECTED_ENERGY = "solar_panel_collected_energy"
SOLAR_PANEL_NO_BATTERY = "solar_panel_no_battery"
# Names
ACCELEROMETER = "accelerometer"
BATTERY = "battery"
HULL = "hull"
INFRARED_SPECTROMETER = "infrared_spectrometer"
LASER_ALTIMETER = "laser_altimeter"
NEUTRON_DENSITOMETER = "neutron_densitometer"
RADIO_TELEMETRY = "radio_telemetry"
SOLAR_PANEL = "solar_panel"
# Science
DENSITY = "density"
MASS = "mass"
ORB_PERIOD = "orb_period"
RADIUS = "radius"
ROT_PERIOD = "rot_period"
TEMPERATURE = "temperature"
VOLUME = "volume"
# Stats
CAPACITY = "capacity"
CHARGE_CAPACITY = "charge_capacity"
HEALTH = "health"
MAX_CAPACITY = "max_capacity"
MAX_HP = "max_hp"
POWER = "power"
POWER_USAGE = "power_usage"
REVEAL_CHANCE = "reveal_chance"
SUCCESS_RATE = "success_rate"
# UI
ADDITIONAL_INFORMATION = "additional_information"
INSPIRED_BY_SEEDSHIP = "inspired_by_seedship"
OPTIONS = "options"
QUIT = "quit"
START_GAME = "start_game"
STATS = "stats"
@classmethod
def get_existing_keys(cls) -> list[str]:
return [getattr(cls, attr) for attr in dir(cls) if not callable(getattr(cls, attr)) and not attr.startswith("__")]
# Path: src/constants/locale_translator.py
from src.utils.file_operations import construct_path, files_in_directory, file_to_dict, str_to_file
from .locales import Locales
LOCALES_FILE_PATH = construct_path("src/data/locale/{language}/")
OUTPUT_TXT_FILE_PATH = construct_path("locale_{language}.txt")
LANGUAGES = ["en"]
class LocaleTranslator():
_instance = None
| KEYS = Locales |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: daojiAnime/aio_retrying
# Path: aio_retrying.py
class ConditionError(Exception):
pass
# Path: aio_retrying.py
def retry(
fn: Callable = None,
*,
attempts: int = 0,
callback: Optional[Callable] = None,
fallback: Union[Callable, Type[BaseException], Any] = None,
timeout: Union[int, float] = None,
delay: int = 0,
retry_exceptions: Tuple[Type[BaseException]] = (Exception,),
fatal_exceptions: Tuple[Type[BaseException]] = (asyncio.CancelledError,),
):
if fn is None:
return partial(
retry,
attempts=attempts,
callback=callback,
fallback=fallback,
timeout=timeout,
delay=delay,
retry_exceptions=retry_exceptions,
fatal_exceptions=fatal_exceptions,
)
@wraps(fn)
def wrapper(*args, **kwargs) -> Coroutine:
async def wrapped(attempt: int = 0) -> Any:
if not asyncio.iscoroutinefunction(fn):
raise ConditionError(
"Only support coroutine function",
)
if timeout is not None and asyncio.TimeoutError not in retry_exceptions:
_retry_exceptions = (asyncio.TimeoutError,) + retry_exceptions
else:
_retry_exceptions = retry_exceptions
try:
if timeout is None:
ret = await fn(*args, **kwargs)
else:
with async_timeout.timeout(timeout):
ret = await fn(*args, **kwargs)
return ret
except ConditionError:
raise
except fatal_exceptions:
raise
except _retry_exceptions as exc:
_attempts = "infinity" if attempts is forever else attempts
logger.debug(
exc.__class__.__name__ + f" -> Tried attempt {attempt} from total {attempts} for {fn}",
exc_info=exc,
)
if attempts is forever or attempt < attempts:
await asyncio.sleep(delay)
return await wrapped(attempt=attempt + 1)
ret = None
if fallback is not None:
if fallback is propagate:
raise exc
if is_exception(fallback):
raise fallback from exc
if callable(fallback):
if asyncio.iscoroutinefunction(fallback): # noqa
ret = await fallback(*args, **kwargs)
else:
ret = fallback(*args, **kwargs)
else:
ret = fallback
if callback is not None:
if not callable(callback):
raise ConditionError(
"Callback must be callable",
)
if asyncio.iscoroutinefunction(callback):
await callback(attempt, exc, args, kwargs)
else:
callback(attempt, exc, args, kwargs)
return ret
return wrapped()
return wrapper
# Path: tests/test_condition_error.py
import asyncio
import pytest
from aio_retrying import ConditionError, retry
async def test_timeout_is_not_none_and_not_async():
@retry(timeout=0.5)
def not_coro():
pass
| with pytest.raises(ConditionError): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: xIMRANx/secret_postcard
# Path: app/db/functions.py
class User(models.User):
@classmethod
async def is_registered(cls, telegram_id: int) -> Union[models.User, bool]:
try:
return await cls.get(telegram_id=telegram_id)
except DoesNotExist:
return False
@classmethod
async def is_admin(cls, telegram_id: int) -> bool:
user = await cls.is_registered(telegram_id)
if not user:
return False
if user.role == "admin":
return True
else:
return False
@classmethod
async def register(cls, telegram_id: int, name: str = None) -> None:
await User(
telegram_id=telegram_id, name=name, create_date=datetime.now()
).save()
@classmethod
async def get_count(cls) -> int:
return await cls.all().count()
@classmethod
async def edit_anonymous(cls, user_id: int, anonymous: bool) -> None:
await cls.filter(telegram_id=user_id).update(anonymous=anonymous)
@classmethod
async def get_all_users(cls) -> list[models.User]:
return await cls.all()
# Path: app/db/functions.py
class Card(models.Card):
@classmethod
async def get_all_card_owners(cls) -> list[models.Card]:
return await cls.filter(approved=True).values_list("owner_id", flat=True)
@classmethod
async def get_count(cls) -> int:
return await cls.all().count()
@classmethod
async def create_card(
cls, file_id: str, description: str, owner_id: int, file_type: str = "photo"
) -> None:
await Card(
file_id=file_id,
description=description,
owner_id=owner_id,
file_type=file_type,
create_date=datetime.now(),
).save()
@classmethod
async def check_exists(cls, user_id: int) -> bool:
return await cls.filter(owner_id=user_id).exists()
@classmethod
async def approve(cls, user_id: int) -> None:
await cls.filter(owner_id=user_id).update(approved=True)
@classmethod
async def get_card(cls, user_id: int) -> Union[models.Card, bool]:
try:
return await cls.get(owner_id=user_id, approved=False)
except DoesNotExist:
return False
@classmethod
async def delete_card(cls, user_id: int) -> None:
await cls.filter(owner_id=user_id).delete()
@classmethod
async def get_all_cards(cls) -> list[models.Card]:
return await cls.filter(approved=True).all()
# Path: app/keyboards/inline.py
def get_approve_keyboard(user_id):
buttons = [
[InlineKeyboardButton(text="✅", callback_data=f"approve:{user_id}")],
[InlineKeyboardButton(text="❌", callback_data=f"decline:{user_id}")],
]
keyboard = InlineKeyboardBuilder(markup=buttons)
return keyboard.as_markup()
# Path: app/config.py
class Config:
bot: ConfigBot
database: ConfigDatabase
settings: ConfigSettings
api: ConfigApi
@classmethod
def parse(cls, data: dict) -> "Config":
sections = {}
for section in fields(cls):
pre = {}
current = data[section.name]
for field in fields(section.type):
if field.name in current:
pre[field.name] = current[field.name]
elif field.default is not MISSING:
pre[field.name] = field.default
else:
raise ValueError(
f"Missing field {field.name} in section {section.name}"
)
sections[section.name] = section.type(**pre)
return cls(**sections)
# Path: app/handlers/user/file.py
from aiogram import Router, Bot, F
from aiogram.types import Message
from app.db.functions import User
from app.db.functions import Card
from app.keyboards.inline import get_approve_keyboard
from app.config import Config
router = Router()
@router.message(F.content_type.in_({"photo", "video", "animation"}))
async def get_postcard(message: Message, bot: Bot, config: Config):
if await Card.check_exists(message.from_user.id):
await message.answer("Вы уже отправили свою открытку!")
return
postcard_type = message.content_type
if message.photo is not None:
file_id = message.photo[-1].file_id
elif message.video is not None:
file_id = message.video.file_id
elif message.animation is not None:
file_id = message.animation.file_id
else:
file_id = None
user_id = message.from_user.id
chat_id = config.settings.chat_id
| if not await User.is_registered(user_id): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: akkoaya/ArticleSpider
# Path: ArticleSpider/items.py
class CnblogItem(scrapy.Item):
url = scrapy.Field()
url_object_id = scrapy.Field()
title = scrapy.Field()
date = scrapy.Field()
writer_id = scrapy.Field()
views_num = scrapy.Field()
comments_num = scrapy.Field()
main_content = scrapy.Field()
def save_to_es(self):
cnblog = CnblogPost()
cnblog.url = self['url'][0]
cnblog.meta.id = self['url_object_id'][0] #设置index的id为url_object_id
cnblog.title = self['title'][0]
cnblog.date = self['date'][0]
cnblog.writer_id = self['writer_id'][0]
cnblog.views_num = self['views_num'][0]
cnblog.comments_num = self['comments_num'][0]
cnblog.main_content = remove_tags(self['main_content'][0])
cnblog.suggest = get_suggests("cnblog",((cnblog.title, 10),)) #注意set里面只有一个元素的时候必须加个逗号,不然不计算该元素
cnblog.save() #保存
redis_cli.incr('cnblog_nums')
return
def get_insert_sql(self):
insert_sql = """
insert into cnblog(url_object_id,url,title,date,writer_id,views_num,comments_num,main_content,)
values(%s,%s,%s,%s,%s,%s,%s,%s)
"""
params = (
self["url_object_id"][0], self["url"][0], self['title'][0],
self['date'][0], self['writer_id'][0], self['views_num'][0],
self['comments_num'][0],self['main_content'][0]
)
return insert_sql, params
# Path: ArticleSpider/utils/common.py
def get_md5(url):
m = hashlib.md5()
m.update(url.encode("utf-8"))
return m.hexdigest()
# Path: scrapy_redis/spiders.py
class RedisSpider(RedisMixin, Spider):
"""Spider that reads urls from redis queue when idle.
Attributes
----------
redis_key : str (default: REDIS_START_URLS_KEY)
Redis key where to fetch start URLs from..
redis_batch_size : int (default: CONCURRENT_REQUESTS)
Number of messages to fetch from redis on each attempt.
redis_encoding : str (default: REDIS_ENCODING)
Encoding to use when decoding messages from redis queue.
Settings
--------
REDIS_START_URLS_KEY : str (default: "<spider.name>:start_urls")
Default Redis key where to fetch start URLs from..
REDIS_START_URLS_BATCH_SIZE : int (deprecated by CONCURRENT_REQUESTS)
Default number of messages to fetch from redis on each attempt.
REDIS_START_URLS_AS_SET : bool (default: False)
Use SET operations to retrieve messages from the redis queue. If False,
the messages are retrieve using the LPOP command.
REDIS_ENCODING : str (default: "utf-8")
Default encoding to use when decoding messages from redis queue.
"""
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
obj = super(RedisSpider, cls).from_crawler(crawler, *args, **kwargs)
obj.setup_redis(crawler)
return obj
# Path: ArticleSpider/spiders/cnblog.py
import scrapy
import datetime
import re
from scrapy.http import Request
from urllib import parse
from ..items import CnblogItem
from ..utils.common import get_md5
from scrapy.loader import ItemLoader
from scrapy_redis.spiders import RedisSpider
class CnblogSpider(scrapy.Spider):
name = "cnblog"
allowed_domains = ["www.cnblogs.com"]
start_urls = ["https://www.cnblogs.com/sitehome/p/1"]
# redis_key = 'cnblog:start_urls'
next_url = "https://www.cnblogs.com/sitehome/p/{0}"
# headers = {
# "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
# }
def parse(self, response):
all_urls = response.css('div.post-list a::attr(href)').extract()
all_urls = [parse.urljoin(response.url, url) for url in all_urls]
for url in all_urls:
match_obj = re.match('(.*.cnblogs.com/(.*)/p/.*.html)',url)
if match_obj:
request_url = match_obj.group(1)
writer_id = match_obj.group(2)
yield Request(url=request_url,meta={'writer_id':writer_id},callback=self.parse_detail)
for x in range(2,100):
yield Request(url=self.next_url.format(x), callback=self.parse)
def parse_detail(self,response):
item_loader = ItemLoader(item=CnblogItem(), response=response)
item_loader.add_value("url", response.url)
| item_loader.add_value("url_object_id", get_md5(response.url)) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Asa-Nisi-Masa/christmas-tree
# Path: christmas_tree/common/settings.py
PATH_SAVE = "coordinates.csv"
# Path: christmas_tree/common/settings.py
TOTAL_LEDS = 500
# Path: christmas_tree/calculations/compute_coords.py
from collections import defaultdict, namedtuple
from pathlib import Path
from typing import Dict, List, Optional
from tqdm import tqdm
from christmas_tree.common.settings import PATH_SAVE, TOTAL_LEDS
import cv2
import numpy as np
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
centers = []
for contour in contours:
M = cv2.moments(contour)
if M["m00"] != 0:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
centers.append(Point(cX, cY))
return centers
def _compute_correct_positions(contour_centers: List[Point]) -> Optional[Point]:
if len(contour_centers) == 0:
return None
if len(contour_centers) == 1:
return contour_centers[0]
min_dist = float("inf")
for i in range(len(contour_centers)):
for j in range(i, len(contour_centers)):
if i == j:
continue
xi, yi = contour_centers[i]
xj, yj = contour_centers[j]
dist2 = (xi - xj) ** 2 + (yi - yj) ** 2
if dist2 < min_dist:
min_dist = dist2
if min_dist < MAX_DIST**2:
centers = np.array(contour_centers).mean(axis=0)
return Point(int(centers[0]), int(centers[1]))
return None
def _get_map_from_index_to_position(angle: int) -> Dict[int, Point]:
map_index_to_position = {}
total_errors = 0
for i in range(TOTAL_LEDS):
path = Path("frames") / str(angle) / f"{i}.jpg"
frame = cv2.imread(str(path))
contour_centers = _compute_naive_positions(frame)
center = _compute_correct_positions(contour_centers)
if center is None:
total_errors += 1
map_index_to_position[i] = None
else:
map_index_to_position[i] = _get_uv(center, width, height)
return map_index_to_position
def get_map_index_to_angle_position() -> Dict[int, Dict[int, Point]]:
# map_index_to_angle_position = map from LED index to a map from angle to LED position
angles_to_centers = {}
map_index_to_angle_position = defaultdict(dict)
for angle in tqdm(ANGLES):
map_index_to_position = _get_map_from_index_to_position(angle)
angles_to_centers[angle] = map_index_to_position
for i in range(TOTAL_LEDS):
map_index_to_angle_position[i][angle] = map_index_to_position[i]
return map_index_to_angle_position
def validate_led_positions(map_index_to_angle_position: Dict[int, Dict[int, Point]]) -> None:
total_no_centers = 0
for i in range(TOTAL_LEDS):
num_angles_center_is_defined = sum(el is not None for el in map_index_to_angle_position[i].values())
if num_angles_center_is_defined < 1:
print(f"No center can be found for {i} LED")
total_no_centers += 1
print("Total no LED positions found:", total_no_centers)
def get_frames_to_xyz(map_index_to_angle_position: Dict[int, Dict[int, Point]]) -> Dict[int, tuple]:
# frames_to_xyz = map from LED index to LED position
frames_to_xyz = {}
for i in range(TOTAL_LEDS):
sum_x = 0
sum_z = 0
sum_y = 0
non_nulls = 0
for angle in ANGLES:
radian = np.pi / 180 * angle
center = map_index_to_angle_position[i][angle]
if center is not None:
sum_x += center.x * np.cos(radian)
sum_z += center.x * np.sin(radian)
sum_y += center.y
non_nulls += 1
if non_nulls > 0:
x = 1 / non_nulls * sum_x
z = 1 / non_nulls * sum_z
y = 1 / non_nulls * sum_y
frames_to_xyz[i] = (x, y, z)
else:
frames_to_xyz[i] = None
return frames_to_xyz
def save_to_file(frames_to_xyz: Dict[int, tuple]):
| with open(PATH_SAVE, "w") as file: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: YYJeffrey/july_server
# Path: app/lib/token.py
def verify_token(token):
def generate_token(user_id):
# Path: app/model/base.py
class BaseModel(db.Model):
def __getitem__(self, key):
def init_on_load(self):
def __set_fields(self):
def _set_fields(self):
def keys(self):
def hide(self, *keys):
def append(self, *keys):
def status(self):
def get_or_404(cls, **kwargs):
def all_or_404(cls, **kwargs):
def get_one(cls, **kwargs):
def get_all(cls, **kwargs):
def create(cls, commit: bool = True, **kwargs):
def update(self, commit: bool = True, **kwargs):
def save(self, commit: bool = True):
def delete(self, commit: bool = True, soft: bool = True):
def get_pagination(cls, not_del: bool = True, **kwargs):
# Path: app/lib/exception.py
class Success(APIException):
code = 200
msg_code = 0
msg = '成功'
# Path: app/lib/exception.py
class Updated(APIException):
code = 200
msg_code = 2
msg = '更新成功'
# Path: app/lib/red_print.py
class RedPrint(object):
"""
红图用于嵌套路由使用
"""
def __init__(self, name):
self.name = name
self.mound = []
def route(self, rule, **options):
def decorator(func):
if 'strict_slashes' not in options:
options['strict_slashes'] = False
self.mound.append((func, rule, options))
return func
return decorator
def register(self, bp, url_prefix=None):
if url_prefix is None:
url_prefix = f"/{self.name}"
for func, rule, options in self.mound:
endpoint = f"{self.name}/{options.pop('endpoint', func.__name__)}"
bp.add_url_rule(url_prefix + rule, endpoint, func, **options)
# Path: app/model/message.py
class Message(BaseModel):
"""
消息模型
"""
__tablename__ = 'message'
content = Column(String(256), nullable=False, comment='内容')
category = Column(Enum(MessageCategory), default=MessageCategory.COMMENT, comment='类型')
is_read = Column(Boolean, default=False, comment='是否已读')
is_anon = Column(Boolean, default=False, comment='是否匿名')
user_id = Column(String(32), nullable=False, index=True, comment='用户标识')
action_user_id = Column(String(32), nullable=False, index=True, comment='发起用户标识')
topic_id = Column(String(32), index=True, comment='话题标识')
def __str__(self):
return self.content
def _set_fields(self):
self.append('push_time')
self._exclude.extend(['action_user_id'])
@property
def push_time(self):
"""
发布时间
"""
if self.create_time is not None:
return datetime_to_hint(self.create_time)
return None
# Path: app/service/message.py
def get_message_list():
"""
查询消息列表
"""
action_user = aliased(User)
data = db.session.query(Message, User, action_user, Topic) \
.outerjoin(User, Message.user_id == User.id) \
.outerjoin(action_user, Message.action_user_id == action_user.id) \
.outerjoin(Topic, Message.topic_id == Topic.id) \
.filter(Message.user_id == g.user.id) \
.filter(Message.is_read.is_(False)) \
.filter(Message.delete_time.is_(None)) \
.all()
for index, (message, _, message.action_user, message.topic) in enumerate(data):
if message.topic is not None:
if message.topic.is_anon and g.user.id != message.topic.user_id:
message.topic.user = None
else:
message.topic.user = User.get_one(id=message.topic.user_id)
if message.topic.video_id is not None:
message.topic.video = Video.get_one(id=message.topic.video_id)
else:
message.topic.video = None
message.topic.append('user', 'video')
if message.is_anon:
message.action_user = None
message.append('action_user', 'topic')
data[index] = message
return data
# Path: app/api/v2/message.py
from flask import g
from app import auth, db
from app.lib.exception import Success, Updated
from app.lib.red_print import RedPrint
from app.model.message import Message
from app.service.message import get_message_list
# -*- coding: utf-8 -*-
"""
:copyright: (c) 2023 by Jeffrey.
:license: Apache 2.0, see LICENSE for more details.
"""
api = RedPrint('message')
@api.route('/', methods=['GET'])
@auth.login_required
def get_messages():
"""
获取消息
"""
messages = get_message_list()
return Success(data=messages)
@api.route('/read', methods=['POST'])
@auth.login_required
def read_messages():
"""
已读信息
"""
with db.auto_commit():
db.session.query(Message).filter_by(user_id=g.user.id, is_read=False).update({Message.is_read: True})
| return Updated() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: lchen1019/Image_Cropper
# Path: ISAT/annotation.py
class Object:
def __init__(self, category:str, group:int, segmentation, area, layer, bbox, iscrowd=0, note=''):
self.category = category
self.group = group
self.segmentation = segmentation
self.area = area
self.layer = layer
self.bbox = bbox
self.iscrowd = iscrowd
self.note = note
# Path: ISAT/configs.py
class STATUSMode(Enum):
VIEW = 0
CREATE = 1
EDIT = 2
# Path: ISAT/configs.py
class CLICKMode(Enum):
POSITIVE = 0
NEGATIVE = 1
# Path: ISAT/configs.py
class DRAWMode(Enum):
POLYGON = 0
SEGMENTANYTHING = 1
# Path: ISAT/configs.py
class CONTOURMode(Enum):
SAVE_MAX_ONLY = 0 # 只保留最多顶点的mask(一般为最大面积)
SAVE_EXTERNAL = 1 # 只保留外轮廓
SAVE_ALL = 2 # 保留所有轮廓
# Path: ISAT/widgets/polygon.py
from PyQt5 import QtCore, QtWidgets, QtGui
from ISAT.annotation import Object
from ISAT.configs import STATUSMode, CLICKMode, DRAWMode, CONTOURMode
import typing
# -*- coding: utf-8 -*-
# @Author : LG
class PromptPoint(QtWidgets.QGraphicsPathItem):
def __init__(self, pos, type=0):
super(PromptPoint, self).__init__()
self.color = QtGui.QColor('#0000FF') if type==0 else QtGui.QColor('#00FF00')
self.color.setAlpha(255)
self.painterpath = QtGui.QPainterPath()
self.painterpath.addEllipse(
QtCore.QRectF(-1, -1, 2, 2))
self.setPath(self.painterpath)
self.setBrush(self.color)
self.setPen(QtGui.QPen(self.color, 3))
self.setZValue(1e5)
self.setPos(pos)
class Vertex(QtWidgets.QGraphicsPathItem):
def __init__(self, polygon, color, nohover_size=2):
super(Vertex, self).__init__()
self.polygon = polygon
self.color = color
self.color.setAlpha(255)
self.nohover_size = nohover_size
self.hover_size = self.nohover_size + 2
self.line_width = 0
self.nohover = QtGui.QPainterPath()
self.nohover.addEllipse(QtCore.QRectF(-self.nohover_size//2, -self.nohover_size//2, self.nohover_size, self.nohover_size))
self.hover = QtGui.QPainterPath()
self.hover.addRect(QtCore.QRectF(-self.nohover_size//2, -self.nohover_size//2, self.nohover_size, self.nohover_size))
self.setPath(self.nohover)
self.setBrush(self.color)
self.setPen(QtGui.QPen(self.color, self.line_width))
self.setFlag(QtWidgets.QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, True)
self.setFlag(QtWidgets.QGraphicsItem.GraphicsItemFlag.ItemIsMovable, True)
self.setFlag(QtWidgets.QGraphicsItem.GraphicsItemFlag.ItemSendsGeometryChanges, True)
self.setAcceptHoverEvents(True)
self.setZValue(1e5)
def setColor(self, color):
self.color = QtGui.QColor(color)
self.color.setAlpha(255)
self.setPen(QtGui.QPen(self.color, self.line_width))
self.setBrush(self.color)
def itemChange(self, change: 'QtWidgets.QGraphicsItem.GraphicsItemChange', value: typing.Any):
if change == QtWidgets.QGraphicsItem.GraphicsItemChange.ItemSelectedHasChanged:
self.scene().mainwindow.actionDelete.setEnabled(self.isSelected())
if self.isSelected():
selected_color = QtGui.QColor('#00A0FF')
self.setBrush(selected_color)
else:
self.setBrush(self.color)
if change == QtWidgets.QGraphicsItem.GraphicsItemChange.ItemPositionChange and self.isEnabled():
# 限制顶点移动到图外
if value.x() < 0:
value.setX(0)
if value.x() > self.scene().width()-1:
value.setX(self.scene().width()-1)
if value.y() < 0:
value.setY(0)
if value.y() > self.scene().height()-1:
value.setY(self.scene().height()-1)
index = self.polygon.vertexs.index(self)
self.polygon.movePoint(index, value)
return super(Vertex, self).itemChange(change, value)
def hoverEnterEvent(self, event: 'QGraphicsSceneHoverEvent'):
| if self.scene().mode == STATUSMode.CREATE: # CREATE |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: aoki-h-jp/crypto-listed-detector
# Path: crypto_listed_detector/fetchapi/binance.py
class BinanceFetch:
_BASE_URL = "https://fapi.binance.com"
def __init__(self):
pass
def get_linear_ticker(self):
url = self._BASE_URL + "/fapi/v1/exchangeInfo"
response = requests.get(url)
return response.json()
def get_all_linear_symbols(self):
return [item["symbol"] for item in self.get_linear_ticker()["symbols"]]
# Path: crypto_listed_detector/fetchapi/bitget.py
class BitgetFetch:
_BASE_URL = "https://api.bitget.com"
def __init__(self):
pass
def get_linear_ticker(self):
url = self._BASE_URL + "/api/v2/mix/market/tickers?productType=USDT-FUTURES"
response = requests.get(url)
return response.json()
def get_all_linear_symbols(self):
return [item["symbol"] for item in self.get_linear_ticker()["data"]]
# Path: crypto_listed_detector/fetchapi/bybit.py
class BybitFetch:
_BASE_URL = "https://api.bybit.com"
def __init__(self):
pass
def get_linear_ticker(self):
url = self._BASE_URL + "/v5/market/tickers?category=linear"
response = requests.get(url)
return response.json()
def get_all_linear_symbols(self):
return [item["symbol"] for item in self.get_linear_ticker()["result"]["list"]]
# Path: crypto_listed_detector/fetchapi/gateio.py
class GateioFetch:
_BASE_URL = "https://api.gateio.ws"
def __init__(self):
pass
def get_contracts(self):
url = self._BASE_URL + "/api/v4/futures/usdt/contracts"
response = requests.get(url)
return response.json()
def get_all_linear_symbols(self):
return [item["name"] for item in self.get_contracts()]
# Path: crypto_listed_detector/fetchapi/kucoin.py
class KucoinFetch:
_BASE_URL = "https://api-futures.kucoin.com"
def __init__(self):
pass
def get_linear_ticker(self):
url = self._BASE_URL + "/api/v1/contracts/active"
response = requests.get(url)
return response.json()
def get_all_linear_symbols(self):
return [item["symbol"] for item in self.get_linear_ticker()["data"]]
# Path: crypto_listed_detector/fetchapi/mexc.py
class MexcFetch:
_BASE_URL = "https://contract.mexc.com"
def __init__(self):
pass
def get_risk_reverse(self):
url = self._BASE_URL + "/api/v1/contract/risk_reverse"
response = requests.get(url)
return response.json()
def get_all_linear_symbols(self):
return [item["symbol"] for item in self.get_risk_reverse()["data"]]
# Path: crypto_listed_detector/fetchapi/okx.py
class OkxFetch:
_BASE_URL = "https://www.okx.com"
def __init__(self):
pass
def get_linear_ticker(self):
url = self._BASE_URL + "/api/v5/public/instruments?instType=SWAP"
response = requests.get(url)
return response.json()
def get_all_linear_symbols(self):
return [item["instId"] for item in self.get_linear_ticker()["data"]]
# Path: crypto_listed_detector/fetchapi/phemex.py
class PhemexFetch:
_BASE_URL = "https://api.phemex.com"
def __init__(self):
pass
def get_linear_products(self):
url = self._BASE_URL + "/public/products"
response = requests.get(url)
return response.json()
def get_all_linear_symbols(self):
return [
item["symbol"] for item in self.get_linear_products()["data"]["products"]
]
# Path: crypto_listed_detector/fetchapi/pionex.py
class PionexFetch:
_BASE_URL = "https://api.pionex.com"
def __init__(self):
pass
def get_linear_symbols(self):
url = self._BASE_URL + "/api/v1/common/symbols"
response = requests.get(url)
return response.json()
def get_all_linear_symbols(self):
return [item["symbol"] for item in self.get_linear_symbols()["data"]["symbols"]]
# Path: crypto_listed_detector/fetchapi/xtcom.py
class XtcomFetch:
_BASE_URL = "https://fapi.xt.com"
def __init__(self):
pass
def get_linear_ticker(self):
url = self._BASE_URL + "/future/market/v1/public/cg/contracts"
response = requests.get(url)
return response.json()
def get_all_linear_symbols(self):
return [item["symbol"] for item in self.get_linear_ticker()]
# Path: crypto_listed_detector/detector.py
import json
from crypto_listed_detector.fetchapi.binance import BinanceFetch
from crypto_listed_detector.fetchapi.bitget import BitgetFetch
from crypto_listed_detector.fetchapi.bybit import BybitFetch
from crypto_listed_detector.fetchapi.gateio import GateioFetch
from crypto_listed_detector.fetchapi.kucoin import KucoinFetch
from crypto_listed_detector.fetchapi.mexc import MexcFetch
from crypto_listed_detector.fetchapi.okx import OkxFetch
from crypto_listed_detector.fetchapi.phemex import PhemexFetch
from crypto_listed_detector.fetchapi.pionex import PionexFetch
from crypto_listed_detector.fetchapi.xtcom import XtcomFetch
"""
crypto-listed-detector
"""
class Detector:
def __init__(self):
"""
Init all fetchers
"""
| self.bybit = BybitFetch() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: harvestingmoon/StableVisionBot
# Path: backend.py
class BackEnd:
def __init__(self,model_id) -> None:
self.model = None
self.curr_picture = None
self.final_img = None
self.call = {1:False,2:False}
self.model_id = (model_id if model_id else "stabilityai/stable-diffusion-2")
def change_picture(self,array): # picture received from user is a byte array need to convert into image
picture = io.BytesIO(array)
image = Image.open(picture).convert("RGB")
self.curr_picture = image # store it temp
def final_(self,img):
self.final_img = img
def get_final(self):
return self.final_img
def get_picture(self):
return self.curr_picture
def change_model(self,model):
self.model = model
def get_model(self):
return self.model
def get_call(self):
return self.call
def call_engine(self,type):
model_id = self.model_id
call = self.get_call()
device = ("cuda" if torch.cuda.is_available() else "cpu")
if not call[type]:
if True in list(call.values()):
for k,v in call.items():
if v == True:
call[k] = False
if type == 1:
scheduler = DDIMScheduler.from_pretrained(model_id,subfolder = "scheduler")
pipe = StableDiffusionPipeline.from_pretrained(model_id,scheduler= scheduler, torch_dtype = torch.float16)
else:
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model_id,torch_dtype = torch.float16)
pipe = pipe.to(device)
self.model = pipe
call[type] = True
return self.get_model()
# Path: backend.py
def post_process(image,to_doc = True):
def resize_image(image, max_size):
quality = 95
while True:
with io.BytesIO() as file:
image.save(file, format='JPEG', quality=quality)
size = file.tell() / 1024 # Size in KB
if size <= max_size:
break
quality -= 5 # Decrease quality by 5. You can change it as needed.
if quality < 0:
raise Exception("Cannot reduce image size under the limit without losing too much quality.")
return image
def enforce_ratio(image,max_ratio): # stick to 20; 1
width, height = image.size
ratio = width / height
if ratio > max_ratio:
new_width = height * max_ratio
image = image.resize((int(new_width), height), Image.ANTIALIAS)
elif ratio < 1 / max_ratio:
new_height = width * max_ratio
image = image.resize((width, int(new_height)), Image.ANTIALIAS)
return image
def limit_pixels(image, max_pixels):
width, height = image.size
current_pixels = width * height
if current_pixels > max_pixels:
# Calculate the scale factor
scale_factor = (max_pixels / current_pixels) ** 0.5
new_width = int(width * scale_factor)
new_height = int(height * scale_factor)
image = image.resize((new_width, new_height), Image.ANTIALIAS)
return image
def pil_to_file(image):
file = io.BytesIO()
if to_doc:
image.save(file, format='PDF')
else:
image.save(file,format = 'JPG')
file.seek(0)
return file
if not to_doc:
image = resize_image(image, 9 * 1024)
image = enforce_ratio(image,18)
image = limit_pixels(image, 8000)
image = pil_to_file(image)
return image
# Path: bot.py
from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove, Update,InlineKeyboardButton,InlineKeyboardMarkup
from telegram.ext import (
Application,
CommandHandler,
ContextTypes,
ConversationHandler,
MessageHandler,
CallbackQueryHandler,
filters,
CallbackContext,
)
from backend import BackEnd,post_process
from PIL import Image
import numpy as np
import json
import logging
import yaml
import emoji
import asyncio
# Simple telegram bot that takes uses stable diffusion
''' Importing YAML'''
with open("config .yaml", "r") as f:
config = yaml.safe_load(f)
model = config['model']
api_key = config['API_KEY']
''' States for bot'''
ONE,TWO,DOCUMENT,PHOTO = range(4)
START,T2IMG,T2IMG2,IMG2IMG,IMG2IMG2,OUTPUT= range(6)
''' User logging'''
logging.basicConfig(
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s', level = logging.INFO
)
logger = logging.getLogger(__name__)
''' Important pipeline for stable diffusion'''
| engine = BackEnd(model) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: khabbazan/Mattermost-Subscriptions
# Path: apps/chat/gql/types.py
class MessageQueryType(graphene.ObjectType):
"""
GraphQL type representing a message in a chat system.
"""
id = graphene.String(description="Unique identifier of the message.")
def resolve_id(root, info):
"""Resolve the message ID."""
return root["id"]
message = graphene.String(description="Content of the message.")
def resolve_message(root, info):
"""Resolve the message content, with special handling for system messages."""
if root["type"] == "system_join_team":
return "Welcome"
return root["message"]
create_at = graphene.String(description="Timestamp when the message was created.")
def resolve_create_at(root, info):
"""Resolve the creation timestamp of the message."""
return root["create_at"]
owner = graphene.Field(UserQueryType, description="User who sent the message.")
def resolve_owner(root, info):
"""Resolve the owner (sender) of the message."""
if isinstance(info.context, WSGIRequest) or isinstance(info.context, ASGIRequest):
return User.objects.filter(username=root["username"]).first()
else:
return User.objects.filter(username=root["username"]).afirst()
type = graphene.String(description="Type of the message, e.g., 'text', 'image', 'system_join_team'.")
def resolve_type(root, info):
"""Resolve the type of the message."""
return root["type"]
# Path: helpers/channels_graphql_ws/subscription.py
LOG = logging.getLogger(__name__)
class Subscription(graphene.ObjectType):
class SubscriptionOptions(graphene.types.objecttype.ObjectTypeOptions):
def broadcast(cls, *, group=None, payload=None):
async def broadcast_async(cls, *, group=None, payload=None):
def broadcast_sync(cls, *, group=None, payload=None):
def unsubscribe(cls, *, group=None):
async def unsubscribe_async(cls, *, group=None):
def unsubscribe_sync(cls, *, group=None):
def Field(cls, name=None, description=None, deprecation_reason=None, required=False): # noqa
def __init_subclass_with_meta__(
cls,
subscribe=None,
publish=None,
unsubscribed=None,
output=None,
arguments=None,
_meta=None,
**options,
): # pylint: disable=arguments-renamed
def _group_name(cls, group=None):
def _channel_layer(cls):
# Path: apps/chat/gql/subscriptions.py
import graphene
from apps.chat.gql.types import MessageQueryType
from helpers.channels_graphql_ws import subscription
class OnNewChatMessage(subscription.Subscription):
"""
GraphQL Subscription for new chat messages.
This subscription allows clients to listen for new messages on a specified channel.
"""
channel_identifier = graphene.String()
| message = graphene.Field(MessageQueryType) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Hatins/DEOE
# Path: models/detection/yolox/models/network_blocks.py
class BaseConv(nn.Module):
"""A Conv2d -> Batchnorm -> silu/leaky relu block"""
def __init__(
self, in_channels, out_channels, ksize, stride, groups=1, bias=False, act="silu"
):
super().__init__()
# same padding
pad = (ksize - 1) // 2
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=ksize,
stride=stride,
padding=pad,
groups=groups,
bias=bias,
)
self.bn = nn.BatchNorm2d(out_channels)
self.act = get_activation(act, inplace=True)
def forward(self, x):
return self.act(self.bn(self.conv(x)))
def fuseforward(self, x):
return self.act(self.conv(x))
# Path: models/detection/yolox/models/network_blocks.py
class CSPLayer(nn.Module):
"""C3 in yolov5, CSP Bottleneck with 3 convolutions"""
def __init__(
self,
in_channels,
out_channels,
n=1,
shortcut=True,
expansion=0.5,
depthwise=False,
act="silu",
):
"""
Args:
in_channels (int): input channels.
out_channels (int): output channels.
n (int): number of Bottlenecks. Default value: 1.
"""
# ch_in, ch_out, number, shortcut, groups, expansion
super().__init__()
hidden_channels = int(out_channels * expansion) # hidden channels
self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act)
self.conv2 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act)
self.conv3 = BaseConv(2 * hidden_channels, out_channels, 1, stride=1, act=act)
module_list = [
Bottleneck(
hidden_channels, hidden_channels, shortcut, 1.0, depthwise, act=act
)
for _ in range(n)
]
self.m = nn.Sequential(*module_list)
def forward(self, x):
x_1 = self.conv1(x)
x_2 = self.conv2(x)
x_1 = self.m(x_1)
x = torch.cat((x_1, x_2), dim=1)
return self.conv3(x)
# Path: models/detection/yolox/models/network_blocks.py
class DWConv(nn.Module):
"""Depthwise Conv + Conv"""
def __init__(self, in_channels, out_channels, ksize, stride=1, act="silu"):
super().__init__()
self.dconv = BaseConv(
in_channels,
in_channels,
ksize=ksize,
stride=stride,
groups=in_channels,
act=act,
)
self.pconv = BaseConv(
in_channels, out_channels, ksize=1, stride=1, groups=1, act=act
)
def forward(self, x):
x = self.dconv(x)
return self.pconv(x)
# Path: data/utils/types.py
class DataType(Enum):
class DatasetType(Enum):
class DatasetMode(Enum):
class DatasetSamplingMode(StrEnum):
class ObjDetOutput(Enum):
EV_REPR = auto()
FLOW = auto()
IMAGE = auto()
OBJLABELS = auto()
OBJLABELS_SEQ = auto()
IS_PADDED_MASK = auto()
IS_FIRST_SAMPLE = auto()
TOKEN_MASK = auto()
GEN1 = auto()
GEN4 = auto()
TRAIN = auto()
VALIDATION = auto()
TESTING = auto()
RANDOM = 'random'
STREAM = 'stream'
MIXED = 'mixed'
LABELS_PROPH = auto()
PRED_PROPH = auto()
EV_REPR = auto()
SKIP_VIZ = auto()
# Path: models/detection/yolox_extension/models/yolo_pafpn.py
from typing import Dict, Optional, Tuple
from torch import compile as th_compile
from ...yolox.models.network_blocks import BaseConv, CSPLayer, DWConv
from data.utils.types import BackboneFeatures
import torch as th
import torch.nn as nn
"""
Original Yolox PAFPN code with slight modifications
"""
try:
except ImportError:
th_compile = None
class YOLOPAFPN(nn.Module):
"""
Removed the direct dependency on the backbone.
"""
def __init__(
self,
depth: float = 1.0,
in_stages: Tuple[int, ...] = (2, 3, 4),
in_channels: Tuple[int, ...] = (256, 512, 1024),
depthwise: bool = False,
act: str = "silu",
compile_cfg: Optional[Dict] = None,
):
super().__init__()
assert len(in_stages) == len(in_channels)
assert len(in_channels) == 3, 'Current implementation only for 3 feature maps'
self.in_features = in_stages
self.in_channels = in_channels
Conv = DWConv if depthwise else BaseConv
###### Compile if requested ######
if compile_cfg is not None:
compile_mdl = compile_cfg['enable']
if compile_mdl and th_compile is not None:
self.forward = th_compile(self.forward, **compile_cfg['args'])
elif compile_mdl:
print('Could not compile PAFPN because torch.compile is not available')
##################################
self.upsample = lambda x: nn.functional.interpolate(x, scale_factor=2, mode='nearest-exact')
self.lateral_conv0 = BaseConv(
in_channels[2], in_channels[1], 1, 1, act=act
)
| self.C3_p4 = CSPLayer( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: yeyingdege/ctr-din-pytorch
# Path: din/embedding.py
class EmbeddingLayer(nn.Module):
def __init__(self, num_emb, embedding_dim):
super(EmbeddingLayer, self).__init__()
self.embeddings = nn.Embedding(num_emb, embedding_dim)
nn.init.xavier_uniform_(self.embeddings.weight)
def forward(self, batch_cat):
batch_embedding = self.embeddings(batch_cat)
return batch_embedding
# Path: din/fc.py
class FCLayer(nn.Module):
def __init__(self, input_size,
hidden_size,
bias,
batch_norm=False,
dropout_rate=0.,
activation='relu',
use_sigmoid=False,
dice_dim=2):
super(FCLayer, self).__init__()
self.use_sigmoid = use_sigmoid
layers = []
if batch_norm:
layers.append(nn.BatchNorm1d(input_size))
# FC -> activation -> dropout
layers.append(nn.Linear(input_size, hidden_size, bias=bias))
if activation.lower() == 'relu':
layers.append(nn.ReLU(inplace=True))
elif activation.lower() == 'dice':
assert dice_dim
layers.append(Dice(hidden_size, dim=dice_dim))
elif activation.lower() == 'prelu':
layers.append(nn.PReLU())
else: # None
pass
layers.append(nn.Dropout(p=dropout_rate))
self.fc = nn.Sequential(*layers)
if self.use_sigmoid:
self.output_layer = nn.Sigmoid()
# weight initialization xavier_normal (or glorot_normal in keras, tf)
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight, gain=1.0)
if m.bias is not None:
nn.init.zeros_(m.bias)
pass
def forward(self, x):
return self.output_layer(self.fc(x)) if self.use_sigmoid else self.fc(x)
# Path: din/attention.py
class DinAttentionLayer(nn.Module):
def __init__(self, embedding_dim=36):
super(DinAttentionLayer, self).__init__()
self.local_att = LocalActivationUnit(hidden_size=[80, 40, 1],
bias=[True, True, True],
embedding_dim=embedding_dim,
batch_norm=False)
def forward(self, query_ad, user_behavior, user_behavior_length):
# query ad : batch_size * embedding_size
# user behavior : batch_size * time_seq_len * embedding_size
# user behavior length: batch_size * time_seq_len
# output : batch_size * 1 * embedding_size
attention_score = self.local_att(query_ad, user_behavior) # [128, 100, 1]
attention_score = torch.transpose(attention_score, 1, 2) # B * 1 * T
# define mask by length
user_behavior_length = user_behavior_length.type(torch.LongTensor)
mask = torch.arange(user_behavior.size(1))[None, :] < user_behavior_length[:, None]
# mask
score = torch.mul(attention_score, mask.type(torch.cuda.FloatTensor)) # batch_size *
score = F.softmax(score, dim=-1)
# multiply weight
output = torch.matmul(score, user_behavior)
return output
# Path: din/model.py
import torch
import torch.nn as nn
from torch.nn import functional as F
from .embedding import EmbeddingLayer
from .fc import FCLayer
from .attention import DinAttentionLayer
class DeepInterestNetwork(nn.Module):
def __init__(self, n_uid, n_mid, n_cat, EMBEDDING_DIM, HIDDEN_DIM=[162,200,80,2]):
super(DeepInterestNetwork, self).__init__()
self.embedding_dim = EMBEDDING_DIM
self.hid_dim = HIDDEN_DIM
# embeddings
self.uid_embeddings = EmbeddingLayer(n_uid, self.embedding_dim)
self.mid_embeddings = EmbeddingLayer(n_mid, self.embedding_dim)
self.cat_embeddings = EmbeddingLayer(n_cat, self.embedding_dim)
self.attn = DinAttentionLayer(embedding_dim=self.embedding_dim*2)
mlp_input_dim = self.embedding_dim * 9
self.mlp = nn.Sequential(
| FCLayer(mlp_input_dim, hidden_size=self.hid_dim[1], bias=True, batch_norm=True, activation='dice'), |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: iamlooper/VIC-TG-Bot
# Path: app/config.py
class _Config:
class CMD:
def __init__(self, func, path, doc):
def __init__(self):
def __str__(self):
# Path: app/core/client/conversation.py
class Conversation:
CONVO_DICT: dict[int, "Conversation"] = {}
class DuplicateConvo(Exception):
def __init__(self, chat: str | int):
super().__init__(f"Conversation already started with {chat} ")
def __init__(
self, chat_id: int | str, filters: Filter | None = None, timeout: int = 10
):
self.chat_id = chat_id
self.filters = filters
self.timeout = timeout
self.responses: list = []
self.set_future()
from app import bot
self._client = bot
def __str__(self):
return json.dumps(self.__dict__, indent=4, ensure_ascii=False, default=str)
def set_future(self, *args, **kwargs):
future = asyncio.Future()
future.add_done_callback(self.set_future)
self.response = future
async def get_response(self, timeout: int | None = None) -> Message | None:
try:
resp_future: asyncio.Future = await asyncio.wait_for(
self.response, timeout=timeout or self.timeout
)
return resp_future
except asyncio.TimeoutError:
raise TimeoutError("Conversation Timeout")
async def send_message(
self,
text: str,
timeout=0,
get_response=False,
**kwargs,
) -> Message | tuple[Message, Message]:
message = await self._client.send_message(
chat_id=self.chat_id, text=text, **kwargs
)
if get_response:
response = await self.get_response(timeout=timeout or self.timeout)
return message, response
return message
async def send_document(
self,
document,
caption="",
timeout=0,
get_response=False,
**kwargs,
) -> Message | tuple[Message, Message]:
message = await self._client.send_document(
chat_id=self.chat_id,
document=document,
caption=caption,
force_document=True,
**kwargs,
)
if get_response:
response = await self.get_response(timeout=timeout or self.timeout)
return message, response
return message
async def __aenter__(self) -> "Conversation":
if isinstance(self.chat_id, str):
self.chat_id = (await self._client.get_chat(self.chat_id)).id
if (
self.chat_id in Conversation.CONVO_DICT.keys()
and Conversation.CONVO_DICT[self.chat_id].filters == self.filters
):
raise self.DuplicateConvo(self.chat_id)
Conversation.CONVO_DICT[self.chat_id] = self
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
Conversation.CONVO_DICT.pop(self.chat_id, None)
if not self.response.done():
self.response.cancel()
# Path: app/core/client/filters.py
from pyrogram import filters as _filters
from pyrogram.types import Message
from app import Config
from app.core.client.conversation import Conversation
# Overall BOT filters
convo_filter = _filters.create(
lambda _, __, message: (message.chat.id in Conversation.CONVO_DICT.keys())
and (not message.reactions)
)
def cmd_check(message: Message, trigger: str) -> bool:
start_str = message.text.split(maxsplit=1)[0]
cmd = start_str.replace(trigger, "", 1)
| return bool(cmd in Config.CMD_DICT.keys()) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Enthusiasm23/primkit
# Path: src/primkit/config.py
LOG_LEVEL = os.environ.get('LOG_LEVEL', 'INFO') # 日志级别
# Path: src/primkit/config.py
LOG_FILE = os.environ.get('LOG_FILE', None) # 日志文件路径
# Path: src/primkit/config.py
LOG_FORMAT = os.environ.get('LOG_FORMAT', '%(asctime)s - %(name)s - %(levelname)s - %(message)s') # 日志格式
# Path: src/primkit/config.py
LOG_FILE_MODE = os.environ.get('LOG_FILE_MODE', 'a') # 日志文件模式
# Path: src/primkit/config.py
MAX_LOG_SIZE = int(os.environ.get('MAX_LOG_SIZE', 10485760)) # 最大日志文件大小(10MB)
# Path: src/primkit/config.py
BACKUP_COUNT = int(os.environ.get('BACKUP_COUNT', 3)) # 保留的日志文件数量
# Path: src/primkit/config.py
LOG_STREAM = os.environ.get('LOG_STREAM', 'True').lower() in ('true', '1', 't') # 是否输出日志到控制台
# Path: src/primkit/utils/LoggerSetup.py
import logging
import logging.handlers
from ..config import LOG_LEVEL, LOG_FILE, LOG_FORMAT, \
LOG_FILE_MODE, MAX_LOG_SIZE, BACKUP_COUNT, LOG_STREAM
def setup_logging(
level=None,
log_file=None,
format=None,
log_file_mode=None,
max_log_size=None,
backup_count=None,
stream=None
):
"""
Configure logging for the application.
:param level: The logging level, e.g., 'DEBUG', 'INFO', 'WARNING'. Defaults to value from config.py but can be overridden by user input.
:param log_file: Path to the log file. If specified, logs will be written to the file. Defaults to value from config.py but can be overridden by user input.
:param format: The format for the logging messages. Defaults to value from config.py but can be overridden by user input.
:param log_file_mode: The mode for writing to the log file, e.g., 'a' for append mode. Defaults to value from config.py but can be overridden by user input.
:param max_log_size: The maximum size of the log file in bytes. When exceeded, the log will rotate. Defaults to value from config.py but can be overridden by user input.
:param backup_count: The number of backup log files to keep. Defaults to value from config.py but can be overridden by user input.
:param stream: Whether to output logs to the console. Defaults to value from config.py but can be overridden by user input.
The function uses the default configuration or configuration provided by the user. Logging can be directed to a file, console, or both based on parameters.
"""
# Use the default configuration or user-provided configuration
if level is not None:
if isinstance(level, int):
log_level = level
else:
log_level = getattr(logging, level.upper(), logging.INFO)
else:
if isinstance(LOG_LEVEL, int):
log_level = LOG_LEVEL
else:
log_level = getattr(logging, LOG_LEVEL.upper(), logging.INFO)
log_file = log_file if log_file is not None else LOG_FILE
format = format if format is not None else LOG_FORMAT
| log_file_mode = log_file_mode if log_file_mode is not None else LOG_FILE_MODE |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Wangyuhao06/2022-adhoc
# Path: pymobility/models/mobility.py
def random_waypoint(*args, **kwargs):
return iter(RandomWaypoint(*args, **kwargs))
# Path: src/node.py
class Node(object):
def __init__(self,id_node):
super(Node, self).__init__()
#multi-agent sys setting
self.node_max=36
self.act_range=self.node_max-1 #最大邻居范围
# current agent-property setting
self.id=id_node#该节点id
# 1 - packets
self.packets_ToSend_id=[]#该节点当前待传的包
self.packets_id_list=[]#该节点至今为止保存过的包id
self.sending_flag=0
self.rec_flag=0
self.trans_task_send=Queue(maxsize=1)#该节点当前传输的任务
self.trans_taskID_rec=[]#该节点当前接收的任务
# 2 - energy
self.current_amp_send=0#节点当前发送增益--------动作
#self.current_amp_receive=0#节点当前接收增益--------动作
self.current_power_send=0#节点当前发送功率
self.current_power_receive=0#节点当前接收功率
self.power_list=[]#节点使用能量记录
self.energy_consumption=0#截至现在能量消耗
# 3 - freq
self.current_freqB=[1]#当前选用频谱块--------动作
self.freqB_list=[1]#频谱块历史
# 4 - topology
self.neibor_idlist=[]
self.next_hop_id=-1#下一条节点id--------动作
# 5 - observation
#self.ob_send=[]
# def observation_rec(self,send_node):
# if len(self.ob_send)==0 or len(send_node.ob_send)==0 :
# raise ValueError("send observation unfinished")
# self.ob_rec.append(self.ob_send[-1])
# self.ob_rec.append(send_node.ob_send[-1])
# return self.ob_rec
def get_send_action(self,ob,action_space):
###缺省决策###
#改变属性
return self.current_amp_send,self.current_freqB,self.next_hop_id
def get_rec_action(self,ob):
###缺省决策###
#改变属性
return self.current_amp_receive
# Path: src/packet.py
class Packet(object):
def __init__(self,id_packet,packet_size,ori_node_id,dst_node_id,time_start_0):
super(Packet, self).__init__()
self.id=id_packet
self.size=packet_size
#节点属性
self.ori_node_id=ori_node_id
self.cur_node_id=ori_node_id
self.dst_node_id=dst_node_id
self.node_list=[ori_node_id]
#T-T属性
self.cur_trans_task_id=-100
self.in_TR=0
self.trans_task_IDlist=[]
#路由属性
self.time_start=time_start_0
self.time_use=0
self.arrive_flag=0
def packet_trans_update(self,trans_task):
if trans_task.trans_property[2]!=self.id:
raise ValueError('trans_task not matched')
self.cur_trans_task_id=trans_task.id
# Path: src/transtask.py
class Trans_task(object):
def __init__(self,trans_id,node_send,node_rec,packet):
self.id=trans_id
self.trans_property=(node_send.id,node_rec.id,packet.id)#基本属性
self.packsize=packet.size
####frequency block info####
self.FreqB_occup=node_send.current_freqB #占用频谱块id
####SINR and Capacity####
self.SNR_C=([],1)#Y(SNR,Capacity)-----------------[X(timeslot1:SNR,Capacity),(timeslot2:SNR,Capacity),...]
####time of trans####
self.time_use=1#int(self.packsize/self.SNR_C[1])+1
self.time_cnt=0
self.finish_flag=0
####energy setting####
self.energy_property = (node_send.current_amp_send,RECAMP)
self.energy_consume=(node_send.current_amp_send*packet.size*PACKENERGY,RECAMP*packet.size*PACKENERGY)
self.power_consume=(round(node_send.current_amp_send*packet.size*PACKENERGY/self.time_use,6),round(RECAMP*packet.size*PACKENERGY/self.time_use,6))
def show_info(self):
return self.trans_property[0],self.trans_property[1],self.trans_property[2]
def Trans_task_update(self):
if self.finish_flag:
return 1
if self.time_cnt>=self.time_use:
self.finish_flag=1
return 1
elif self.time_cnt<self.time_use:
self.time_cnt+=1
return 0
#trans_task=tuple([],{},(node_send_id,node_send_amp,node_rec_id,node_rec_amp,packet_id),0)
#tuple:([占用频谱块id],{(timeslot1:SNR,Capacity),(timeslot2:SNR,Capacity),...},(基本属性:发送节点id,发送增益,接收节点id,接收增益,包id),完成标志位)
# Path: src/env.py
import random
import numpy as np
from math import log2, log10
from queue import Queue
from pymobility.models.mobility import random_waypoint
from src.node import Node
from src.packet import Packet
from src.parameter import *
from src.transtask import Trans_task
class Environment():
#初始化环境
def __init__(self):
#初始数据-最大节点数
self.node_max=NODE_MAX
self.node_space_size=NODE_MAX
self.node_moving_area=MOV_AREA
#初始化二维平面
| self.geo_area = random_waypoint(self.node_max, dimensions=(MOV_AREA, MOV_AREA), velocity=(10, 15), wt_max=1.0) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: karthicksivakumarp/gui_read_csv
# Path: read_from_csv/read_csv_file.py
class read_csv_data:
def __init__(self):
def read_mult_csv_file(self):
# Path: data_analysis/analyze_data.py
class analyze_csv_data:
def __init__(self):
def pass_data_frame(self, df_list, csv_filepaths, columns):
def analyze_data_all(self):
def analyze_data_single_csv(self, index):
# Path: report_generation/generate_report.py
class generate_report:
def __init__(self):
"""
Constructor for the generate_report class.
Initializes instance variables to store analysis data.
Customize this file for your needs to generate report
"""
# Initialize instance variables to store analysis data
self.analysis_data_1 = None
self.analysis_data_2 = None
self.analysis_data_3 = None
def generate_report(self, data1, data2, data3):
"""
Method to generate a report by assigning analysis data to instance variables.
Parameters:
- data1: The first set of analysis data.
- data2: The second set of analysis data.
- data3: The third set of analysis data.
"""
# Assign data1 to analysis_data_1
self.analysis_data_1 = data1
# Assign data2 to analysis_data_2
self.analysis_data_2 = data2
# Assign data3 to analysis_data_3
self.analysis_data_3 = data3
# Print analysis_data_1
print("Analysis Data 1:")
print(self.analysis_data_1)
# Print analysis_data_2
print("Analysis Data 2:")
print(self.analysis_data_2)
# Print analysis_data_3
print("Analysis Data 3:")
print(self.analysis_data_3)
# Path: user_interface/gui.py
class UI(Frame):
def __init__(self, root, ui_read_csv, ui_data_analysis, ui_report_gen):
def set_status_message(self, message):
def init_menu_bar(self):
def config_frame(self):
def top_left_frame(self):
def bottom_left_frame(self):
def right_frame(self):
def read_csv_files(self):
def on_listbox_select(self, event):
def analyze_csv_files(self):
def analyze_all_csv_files(self):
def generate_report_single(self):
def generate_report_all(self):
# Path: main.py
from read_from_csv import read_csv_file
from data_analysis import analyze_data
from report_generation import generate_report
from tkinter import Tk
from user_interface import gui
# Import necessary modules
# Initialize CSV reader instance
read_csv = read_csv_file.read_csv_data()
# Obtain the function/method for reading multiple CSV files
# Note: "read_mult_csv_file" is a function or method defined in the "read_csv_file" module
main_read_csv = read_csv.read_mult_csv_file
# Initialize data analyzer instance
analyze_data = analyze_data.analyze_csv_data()
# Initialize report generator instance
report_gen = generate_report.generate_report()
# Create the main Tkinter window
root = Tk()
root.title('Csv DataAnalyzer') # Set the title of the Tkinter window
root.geometry("800x600") # Set the initial dimensions of the Tkinter window
# Create the user interface (GUI) using the UI class from the "user_interface" module
# Pass the necessary components (main_read_csv, analyze_data, report_gen) to the GUI
| gui.UI(root, main_read_csv, analyze_data, report_gen)
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Slenderman00/Ask-Surf
# Path: AskSurf/settings.py
def load_settings():
# check if settings.toml exists
if not settings_exist():
create_settings()
edit_settings()
return load_settings()
with open(own_dir / "settings.toml", "r") as f:
settings = toml.load(f)
return settings
# Path: AskSurf/settings.py
def settings_exist():
return (own_dir / "settings.toml").exists()
# Path: AskSurf/settings.py
def edit_settings():
os.system(f"{select_editor()} {own_dir / 'settings.toml'}")
# Path: AskSurf/cli.py
import os
import requests
import argparse
import tqdm
import time
import subprocess
import sys
from pathlib import Path
from halo import Halo
from .settings import load_settings, settings_exist, edit_settings
settings = {}
own_dir = Path(__file__).parent.absolute()
question_pipe = own_dir / "question_pipe"
response_pipe = own_dir / "response_pipe"
def conditional_decorator(dec, condition):
def decorator(func):
if not condition:
# Return the function unchanged, not decorated.
return func
return dec(func)
return decorator
def parse_message(message):
# replace the tags with the correct color codes
message = message.replace("[RED]", "\033[31m")
message = message.replace("[YELLOW]", "\033[33m")
message = message.replace("[ORANGE]", "\033[33m")
message = message.replace("[GREEN]", "\033[32m")
message = message.replace("[PURPLE]", "\033[35m")
message = message.replace("[BLUE]", "\033[34m")
message = message.replace("[NORMAL]", "\033[0m")
# replace all end tags with the normal color code
message = message.replace("[/RED]", "\033[0m")
message = message.replace("[/YELLOW]", "\033[0m")
message = message.replace("[/ORANGE]", "\033[0m")
message = message.replace("[/GREEN]", "\033[0m")
message = message.replace("[/PURPLE]", "\033[0m")
message = message.replace("[/BLUE]", "\033[0m")
message = message.replace("[/NORMAL]", "\033[0m")
return message
def init():
if not model_exists():
print("Please select a model")
download_model(select_model())
if not settings_exist():
print("Please make sure the settings are correct")
settings = load_settings()
exit(1)
def main():
"""Main entry point for the application"""
init()
# parse the arguments
parser = argparse.ArgumentParser(description="AskSurf CLI")
parser.add_argument(
"question",
nargs=argparse.REMAINDER,
help="The question to ask Dolphin",
)
parser.add_argument(
"--model",
"-m",
action="store_true",
help="The model to use",
)
parser.add_argument(
"--delete",
"-d",
action="store_true",
help="Delete the model",
)
parser.add_argument(
"--kill",
"-k",
action="store_true",
help="Kill the Dolphin service",
)
parser.add_argument(
"--settings",
"-s",
action="store_true",
help="Edit the settings",
)
args = parser.parse_args()
if args.model:
download_model(select_model())
return
if args.delete:
delete_model()
return
if args.kill:
os.system("pkill -f dolphin_service.py")
return
if args.settings:
| edit_settings() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: davidsvy/fractal_video
# Path: src/utils/data.py
def dataset_stats(root, ext):
n_train = len(find_files(dir=os.path.join(root, 'train'), ext=ext))
n_val = len(find_files(dir=os.path.join(root, 'val'), ext=ext))
n_test = len(find_files(dir=os.path.join(root, 'test'), ext=ext))
print(f'train -> {n_train} files')
print(f'val -> {n_val} files')
print(f'test -> {n_test} files')
# Path: src/utils/other.py
def run_bash(command):
return subprocess.run(command, shell=True, capture_output=True, text=True)
# Path: src/prepare_data/diving48.py
import json
import os
import shutil
from ..utils.data import dataset_stats
from ..utils.other import run_bash
def move_files(path_split, dir_src, dir_tgt, ext):
with open(path_split, 'r') as file:
lut = json.load(file)
for item in lut:
filename = f'{item["vid_name"]}.{ext}'
path_src = os.path.join(dir_src, filename)
label = str(item['label'])
dir_label = os.path.join(dir_tgt, label)
path_tgt = os.path.join(dir_label, filename)
os.makedirs(dir_label, exist_ok=True)
shutil.move(path_src, path_tgt)
def diving48(root):
"""
train -> 15943 files
val -> 2096 files
"""
url_data = 'http://www.svcl.ucsd.edu/projects/resound/Diving48_rgb.tar.gz'
url_split_train = 'http://www.svcl.ucsd.edu/projects/resound/Diving48_train.json'
url_split_val = 'http://www.svcl.ucsd.edu/projects/resound/Diving48_test.json'
path_data = os.path.join(root, os.path.basename(url_data))
path_split_train = os.path.join(root, os.path.basename(url_split_train))
path_split_val = os.path.join(root, os.path.basename(url_split_val))
dir_src = os.path.join(root, 'rgb')
dir_train = os.path.join(root, 'train')
dir_val = os.path.join(root, 'val')
ext = 'mp4'
os.makedirs(dir_train, exist_ok=True)
os.makedirs(dir_val, exist_ok=True)
print('\nDownloading DIVING48...')
run_bash(f'wget {url_split_train} -P {root}')
run_bash(f'wget {url_split_val} -P {root}')
run_bash(f'wget {url_data} -P {root}')
print('Extracting DIVING48...')
run_bash(f'tar -xf {path_data} -C {root}')
os.remove(path_data)
move_files(
path_split=path_split_train, dir_src=dir_src,
dir_tgt=dir_train, ext=ext
)
move_files(
path_split=path_split_val, dir_src=dir_src,
dir_tgt=dir_val, ext=ext
)
shutil.rmtree(dir_src)
os.remove(path_split_train)
os.remove(path_split_val)
| dataset_stats(root=root, ext=ext) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: OpenBrickProtocolFoundation/client
# Path: tetrion.py
class Event(NamedTuple):
key: Key
type: EventType
frame: int
# Path: tetrion.py
class EventType(Enum):
PRESSED = 0
RELEASED = 1
# Path: tetrion.py
class Key(Enum):
LEFT = 0
RIGHT = 1
DROP = 2
# Path: tetrion.py
class Tetrion:
def __init__(self) -> None:
self._tetrion = _create_tetrion()
def try_get_active_tetromino(self) -> Optional[Tetromino]:
return _tetrion_try_get_active_tetromino(self._tetrion)
def simulate_up_until(self, frame: int) -> None:
_tetrion_simulate_up_until(self._tetrion, frame)
def enqueue_event(self, event: Event) -> None:
_tetrion_enqueue_event(self._tetrion, event)
def matrix(self) -> Matrix:
matrix = _tetrion_matrix(self._tetrion)
minos: list[TetrominoType] = []
for y in range(self.height):
for x in range(self.width):
minos.append(_matrix_get(matrix, Vec2(x, y)))
return Matrix(minos, self.width)
@cached_property
def width(self) -> int:
return _tetrion_width()
@cached_property
def height(self) -> int:
return _tetrion_height()
def __enter__(self) -> Self:
return self
def __exit__(self, exc_type: type[BaseException], exc_val: BaseException, exc_tb: types.TracebackType) -> bool:
self.__del__()
return exc_type is None
def __del__(self) -> None:
if self._tetrion is not None:
_destroy_tetrion(self._tetrion)
self._tetrion = None
# Path: main.py
import pygame
from tetrion import Event
from tetrion import EventType
from tetrion import Key
from tetrion import Tetrion
def main() -> None:
frame = 0
with Tetrion() as tetrion:
pygame.init()
RECT_SIZE = 30
size = (RECT_SIZE * tetrion.width, (RECT_SIZE + 2) * tetrion.height)
screen = pygame.display.set_mode(size)
COLORS = [(0, 0, 0),
(0, 240, 240),
(0, 0, 240),
(240, 160, 0),
(240, 240, 0),
(0, 240, 0),
(160, 0, 240),
(240, 0, 0)]
done = False
clock = pygame.time.Clock()
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
done = True
elif event.key == pygame.K_a:
| tetrion.enqueue_event(Event(key=Key.LEFT, type=EventType.PRESSED, frame=frame)) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Birch-san/natten-fwd-ad
# Path: src/natten_block.py
class NattenBlock(Module):
def __init__(self, d_model: int, d_head: int, kernel_size: int):
super().__init__()
self.d_head = d_head
self.n_heads = d_model // d_head
self.kernel_size = kernel_size
self.qkv_proj = Linear(d_model, d_model * 3, bias=False)
self.out_proj = Linear(d_model, d_model, bias=False)
def forward(self, x: FloatTensor) -> FloatTensor:
qkv = self.qkv_proj(x)
q, k, v = rearrange(qkv, "n h w (t nh e) -> t n nh h w e", t=3, e=self.d_head)
q = q / self.d_head**.5
qk = natten2dqk(q, k, self.kernel_size, 1)
a = qk.softmax(dim=-1)
x = natten2dav(a, v, self.kernel_size, 1)
x = rearrange(x, "n nh h w e -> n h w (nh e)")
x = self.out_proj(x)
return x
# Path: src/hood_attn_block.py
class NeighbourhoodAttnBlock(Module):
def __init__(self, d_model: int, d_head: int, kernel_size: int):
"""
Pure-PyTorch implementation of neighbourhood attention.
Uses global self-attention and a (very) complicated mask.
Consequently it (probably) supports:
- Mac
- PyTorch Forward-Mode Autodiff
- Nested tensors
"""
super().__init__()
self.d_head = d_head
self.n_heads = d_model // d_head
self.kernel_size = kernel_size
self.qkv_proj = Linear(d_model, d_model * 3, bias=False)
self.out_proj = Linear(d_model, d_model, bias=False)
def forward(self, x: FloatTensor) -> FloatTensor:
_, h, w, _ = x.shape
qkv = self.qkv_proj(x)
q, k, v = rearrange(qkv, "n h w (t nh e) -> t n nh (h w) e", t=3, e=self.d_head)
kernel_size=Dimensions(self.kernel_size, self.kernel_size)
canvas_size=Dimensions(h, w)
mask: BoolTensor = make_neighbourhood_mask(kernel_size, canvas_size, flatten_to_1d=True, device=x.device)
mask = mask.unsqueeze(0).unsqueeze(0)
x = scaled_dot_product_attention(q, k, v, attn_mask=mask)
x = rearrange(x, "n nh (h w) e -> n h w (nh e)", h=h, w=w, e=self.d_head)
x = self.out_proj(x)
return x
# Path: script/demo.py
import torch
import torch.autograd.forward_ad as fwAD
from torch import inference_mode, enable_grad
from torch.backends.cuda import sdp_kernel
from src.natten_block import NattenBlock
from src.hood_attn_block import NeighbourhoodAttnBlock
device=torch.device('cuda')
dtype=torch.bfloat16
seed=42
d_model=128
d_head=64
kernel_size=13
torch.manual_seed(seed)
| natten_block = NattenBlock(d_model, d_head=d_head, kernel_size=kernel_size).to(device=device, dtype=dtype) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ysyBrenda/Transformer-For-Geochemical-Anomaly-Detection
# Path: transformer/Models.py
class Transformer(nn.Module):
''' A sequence to sequence model with attention mechanism. '''
def __init__(
self, src_pad_idx, trg_pad_idx,
d_word_vec=38, d_model=38, d_inner=2048,
n_layers=6, n_head=8, d_k=38, d_v=38, dropout=0.1, n_position=2000,
):
super().__init__()
self.src_pad_idx, self.trg_pad_idx = src_pad_idx, trg_pad_idx
self.scale_prj = False #True
self.d_model = d_model
self.encoder = Encoder(
n_position=n_position,
d_word_vec=d_word_vec, d_model=d_model, d_inner=d_inner,
n_layers=n_layers, n_head=n_head, d_k=d_k, d_v=d_v,
pad_idx=src_pad_idx, dropout=dropout)
self.decoder = Decoder(
n_position=n_position,
d_word_vec=d_word_vec, d_model=d_model, d_inner=d_inner,
n_layers=n_layers, n_head=n_head, d_k=d_k, d_v=d_v,
pad_idx=trg_pad_idx, dropout=dropout)
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
assert d_model == d_word_vec, \
'To facilitate the residual connections, \
the dimensions of all module outputs shall be the same.'
def forward(self, src_seq, trg_seq):
src_mask=get_pad_mask(src_seq[:,:,0], self.src_pad_idx)
trg_mask=trg_seq[:, :,0] #.unsqueeze(1)
trg_mask = get_pad_mask(trg_mask, self.trg_pad_idx) & get_subsequent_mask(trg_mask)
enc_output,enc_slf_attn_list = self.encoder(src_seq, src_mask,return_attns=True)
dec_output, dec_slf_attn_list, dec_enc_attn_list= self.decoder(trg_seq, trg_mask, enc_output, src_mask,return_attns=True)
seq_logit=dec_output
return seq_logit.view(-1, seq_logit.size(2)),enc_slf_attn_list,dec_enc_attn_list
# Path: transformer/Translator.py
class Translator(nn.Module):
''' Load a trained model and translate in beam search fashion. '''
def __init__(
self, model,src_pad_idx):
super(Translator, self).__init__()
self.src_pad_idx = src_pad_idx
self.model = model
self.model.eval()
def _model_decode(self, trg_seq, enc_output, src_mask):
trg_mask = get_subsequent_mask(trg_seq[:, :,0] )
dec_output, dec_slf_attn,dec_enc_attn = self.model.decoder(trg_seq, trg_mask, enc_output, src_mask,return_attns=True)
seq_logit=dec_output
return seq_logit.view(-1, seq_logit.size(2)),dec_enc_attn
def translate_sentence(self, src_seq,trg_seq):
src_pad_idx= self.src_pad_idx
with torch.no_grad():
if len(src_seq.size())==2:
src_seq=src_seq.unsqueeze(0)
trg_seq=trg_seq.unsqueeze(0)
src_mask = get_pad_mask(src_seq[:,:,0], src_pad_idx)
enc_output, *_ = self.model.encoder(src_seq, src_mask)
dec_output,dec_enc_attn = self._model_decode(trg_seq.unsqueeze(0), enc_output, src_mask)
return dec_output,dec_enc_attn
# Path: anomaly_detection.py
import torch
import argparse
import dill as pickle
import numpy as np
import calculate_anomalyscore
import torch.utils.data as Data
import time
from tqdm import tqdm
from transformer.Models import Transformer
from transformer.Translator import Translator
'''
geochemical anomaly detection
1,reconstruct geochemical data with trained model.
2,then, identify geochemical anomaly
Author: ysyBrenda
'''
def load_model(opt, device):
checkpoint = torch.load(opt.model, map_location=device)
model_opt = checkpoint['settings']
| model = Transformer( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: camenduru/MotionCtrl-hf
# Path: lvdm/basics.py
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
# Path: lvdm/basics.py
def normalization(channels, num_groups=32):
"""
Make a standard normalization layer.
:param channels: number of input channels.
:return: an nn.Module for normalization.
"""
return GroupNormSpecific(num_groups, channels)
# Path: lvdm/basics.py
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
# Path: lvdm/common.py
def checkpoint(func, inputs, params, flag):
"""
Evaluate a function without caching intermediate activations, allowing for
reduced memory at the expense of extra compute in the backward pass.
:param func: the function to evaluate.
:param inputs: the argument sequence to pass to `func`.
:param params: a sequence of parameters `func` depends on but does not
explicitly take as arguments.
:param flag: if False, disable gradient checkpointing.
"""
if flag:
try:
return ckpt(func, *inputs)
except:
args = tuple(inputs) + tuple(params)
return CheckpointFunction.apply(func, len(inputs), *args)
else:
return func(*inputs)
# Path: lvdm/common.py
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
# Path: lvdm/common.py
def exists(val):
return val is not None
# Path: lvdm/common.py
def init_(tensor):
dim = tensor.shape[-1]
std = 1 / math.sqrt(dim)
tensor.uniform_(-std, std)
return tensor
# Path: lvdm/common.py
def max_neg_value(t):
return -torch.finfo(t.dtype).max
# Path: lvdm/common.py
def uniq(arr):
return{el: True for el in arr}.keys()
# Path: lvdm/modules/attention.py
import math
import torch
import torch.nn.functional as F
import xformers
import xformers.ops
from functools import partial
from inspect import isfunction
from einops import rearrange, repeat
from torch import einsum, nn
from lvdm.basics import conv_nd, normalization, zero_module
from lvdm.common import checkpoint, default, exists, init_, max_neg_value, uniq
try:
XFORMERS_IS_AVAILBLE = True
except:
XFORMERS_IS_AVAILBLE = False
class RelativePosition(nn.Module):
""" https://github.com/evelinehong/Transformer_Relative_Position_PyTorch/blob/master/relative_position.py """
def __init__(self, num_units, max_relative_position):
super().__init__()
self.num_units = num_units
self.max_relative_position = max_relative_position
self.embeddings_table = nn.Parameter(torch.Tensor(max_relative_position * 2 + 1, num_units))
nn.init.xavier_uniform_(self.embeddings_table)
def forward(self, length_q, length_k):
device = self.embeddings_table.device
range_vec_q = torch.arange(length_q, device=device)
range_vec_k = torch.arange(length_k, device=device)
distance_mat = range_vec_k[None, :] - range_vec_q[:, None]
distance_mat_clipped = torch.clamp(distance_mat, -self.max_relative_position, self.max_relative_position)
final_mat = distance_mat_clipped + self.max_relative_position
# final_mat = th.LongTensor(final_mat).to(self.embeddings_table.device)
# final_mat = th.tensor(final_mat, device=self.embeddings_table.device, dtype=torch.long)
final_mat = final_mat.long()
embeddings = self.embeddings_table[final_mat]
return embeddings
class CrossAttention(nn.Module):
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.,
relative_position=False, temporal_length=None):
super().__init__()
inner_dim = dim_head * heads
| context_dim = default(context_dim, query_dim) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: vita-epfl/social-transmotion
# Path: dataset_jrdb.py
def batch_process_coords(coords, masks, padding_mask, config, modality_selection='traj+2dbox', training=False, multiperson=True):
joints = coords.to(config["DEVICE"])
masks = masks.to(config["DEVICE"])
in_F = config["TRAIN"]["input_track_size"]
in_joints_pelvis = joints[:,:, (in_F-1):in_F, 0:1, :].clone()
in_joints_pelvis_last = joints[:,:, (in_F-2):(in_F-1), 0:1, :].clone()
joints[:,:,:,0] = joints[:,:,:,0] - joints[:,0:1, (in_F-1):in_F, 0]
joints[:,:,:,1:] = (joints[:,:,:,1:] - joints[:,:,(in_F-1):in_F,1:])*0.25 #rescale for BB
B, N, F, J, K = joints.shape
if not training:
if modality_selection=='traj':
joints[:,:,:,1:]=0
elif modality_selection=='traj+2dbox':
pass
else:
print('modality error')
exit()
else:
# augment JRDB traj
joints[:,:,:,0,:3] = getRandomRotatePoseTransform(config)(joints[:,:,:,0,:3])
joints = joints.transpose(1, 2).reshape(B, F, N*J, K)
in_joints_pelvis = in_joints_pelvis.reshape(B, 1, N, K)
in_joints_pelvis_last = in_joints_pelvis_last.reshape(B, 1, N, K)
masks = masks.transpose(1, 2).reshape(B, F, N*J)
in_F, out_F = config["TRAIN"]["input_track_size"], config["TRAIN"]["output_track_size"]
in_joints = joints[:,:in_F].float()
out_joints = joints[:,in_F:in_F+out_F].float()
in_masks = masks[:,:in_F].float()
out_masks = masks[:,in_F:in_F+out_F].float()
return in_joints, in_masks, out_joints, out_masks, padding_mask.float()
# Path: dataset_jrdb.py
def create_dataset(dataset_name, logger, **args):
logger.info("Loading dataset " + dataset_name)
if dataset_name == 'jta_all_visual_cues':
dataset = JtaAllVisualCuesDataset(**args)
elif dataset_name == 'jrdb_2dbox':
dataset = Jrdb2dboxDataset(**args)
else:
raise ValueError(f"Dataset with name '{dataset_name}' not found.")
return dataset
# Path: dataset_jrdb.py
def collate_batch(batch):
joints_list = []
masks_list = []
num_people_list = []
for joints, masks in batch:
joints_list.append(joints)
masks_list.append(masks)
num_people_list.append(torch.zeros(joints.shape[0]))
joints = pad_sequence(joints_list, batch_first=True)
masks = pad_sequence(masks_list, batch_first=True)
padding_mask = pad_sequence(num_people_list, batch_first=True, padding_value=1).bool()
return joints, masks, padding_mask
# Path: model_jrdb.py
def create_model(config, logger):
seq_len = config["MODEL"]["seq_len"]
token_num = config["MODEL"]["token_num"]
nhid=config["MODEL"]["dim_hidden"]
nhead=config["MODEL"]["num_heads"]
nlayers_local=config["MODEL"]["num_layers_local"]
nlayers_global=config["MODEL"]["num_layers_global"]
dim_feedforward=config["MODEL"]["dim_feedforward"]
if config["MODEL"]["type"] == "transmotion":
logger.info("Creating bert model.")
model = TransMotion(tok_dim=seq_len,
nhid=nhid,
nhead=nhead,
dim_feedfwd=dim_feedforward,
nlayers_local=nlayers_local,
nlayers_global=nlayers_global,
output_scale=config["MODEL"]["output_scale"],
obs_and_pred=config["TRAIN"]["input_track_size"] + config["TRAIN"]["output_track_size"],
num_tokens=token_num,
device=config["DEVICE"]
).to(config["DEVICE"]).float()
else:
raise ValueError(f"Model type '{config['MODEL']['type']}' not found")
return model
# Path: utils/utils.py
def create_logger(logdir):
head = '%(asctime)-15s %(message)s'
if logdir != '':
log_file = os.path.join(logdir, 'log.txt')
logging.basicConfig(filename=log_file, format=head)
# output to console as well
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
else:
logging.basicConfig(format=head)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
return logger
# Path: evaluate_jrdb.py
import argparse
import torch
import random
import numpy as np
from progress.bar import Bar
from torch.utils.data import DataLoader
from dataset_jrdb import batch_process_coords, create_dataset, collate_batch
from model_jrdb import create_model
from utils.utils import create_logger
def inference(model, config, input_joints, padding_mask, out_len=14):
model.eval()
with torch.no_grad():
pred_joints = model(input_joints, padding_mask)
output_joints = pred_joints[:,-out_len:]
return output_joints
def evaluate_ade_fde(model, modality_selection, dataloader, bs, config, logger, return_all=False, bar_prefix="", per_joint=False, show_avg=False):
in_F, out_F = config['TRAIN']['input_track_size'], config['TRAIN']['output_track_size']
bar = Bar(f"EVAL ADE_FDE", fill="#", max=len(dataloader))
batch_size = bs
batch_id = 0
ade = 0
fde = 0
ade_batch = 0
fde_batch = 0
for i, batch in enumerate(dataloader):
joints, masks, padding_mask = batch
padding_mask = padding_mask.to(config["DEVICE"])
| in_joints, in_masks, out_joints, out_masks, padding_mask = batch_process_coords(joints, masks, padding_mask, config, modality_selection) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: facebookresearch/ca_body
# Path: ca_body/nn/blocks.py
def tile2d(x, size: int):
"""Tile a given set of features into a convolutional map.
Args:
x: float tensor of shape [N, F]
size: int or a tuple
Returns:
a feature map [N, F, size[0], size[1]]
"""
# size = size if isinstance(size, tuple) else (size, size)
# NOTE: expecting only int here (!!!)
return x[:, :, np.newaxis, np.newaxis].expand(-1, -1, size, size)
# Path: ca_body/nn/blocks.py
def weights_initializer(lrelu_slope=0.2):
# pyre-ignore
def init_fn(m):
if isinstance(
m,
(
nn.Conv2d,
nn.Conv1d,
nn.ConvTranspose2d,
nn.Linear,
),
):
gain = nn.init.calculate_gain("leaky_relu", lrelu_slope)
nn.init.kaiming_uniform_(m.weight.data, a=gain)
if hasattr(m, "bias") and m.bias is not None:
nn.init.zeros_(m.bias.data)
else:
logger.debug(f"skipping initialization for {m}")
return init_fn
# Path: ca_body/nn/shadow.py
import logging
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import ca_body.nn.layers as la
from typing import Optional, Dict
from ca_body.nn.blocks import tile2d, weights_initializer
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# TODO: use shared utils here?
logger = logging.getLogger(__name__)
class ShadowUNet(nn.Module):
def __init__(
self,
uv_size,
ao_mean,
shadow_size,
lrelu_slope=0.2,
beta=1.0,
n_dims=64,
interp_mode="bilinear",
biases=True,
trainable_mean=False,
):
super().__init__()
# this is the size of the output
self.uv_size = uv_size
self.shadow_size = shadow_size
ao_mean = F.interpolate(
th.as_tensor(ao_mean)[np.newaxis],
size=(self.shadow_size, self.shadow_size),
)[0]
if not trainable_mean:
# TODO:
self.register_buffer("ao_mean", ao_mean)
else:
self.register_parameter("ao_mean", th.nn.Parameter(ao_mean))
self.depth = 3
self.lrelu_slope = lrelu_slope
self.interp_mode = interp_mode
self.align_corners = None
if interp_mode == "bilinear":
self.align_corners = False
# the base number of dimensions for the shadow maps
n_dims = n_dims
# TODO: generate this?
self.n_enc_dims = [
(1, n_dims),
(n_dims, n_dims),
(n_dims, n_dims),
(n_dims, n_dims),
]
self.sizes = [shadow_size // (2**i) for i in range(len(self.n_enc_dims))]
logger.debug(f"sizes: {self.sizes}")
self.enc_layers = nn.ModuleList()
for i, size in enumerate(self.sizes):
n_in, n_out = self.n_enc_dims[i]
logger.debug(f"EncoderLayers({i}): {n_in}, {n_out}, {size}")
self.enc_layers.append(
nn.Sequential(
la.Conv2dWNUB(
n_in,
n_out,
kernel_size=3,
height=size,
width=size,
stride=1,
padding=1,
),
nn.LeakyReLU(self.lrelu_slope, inplace=True),
)
)
self.n_dec_dims = [
(n_dims, n_dims),
(n_dims * 2, n_dims),
(n_dims * 2, n_dims),
(n_dims * 2, n_dims),
]
self.dec_layers = nn.ModuleList()
for i in range(len(self.sizes)):
size = self.sizes[-i - 1]
n_in, n_out = self.n_dec_dims[i]
logger.debug(f"DecoderLayer({i}): {n_in}, {n_out}, {size}")
self.dec_layers.append(
nn.Sequential(
la.Conv2dWNUB(
n_in,
n_out,
kernel_size=3,
height=size,
width=size,
stride=1,
padding=1,
),
nn.LeakyReLU(self.lrelu_slope, inplace=True),
)
)
| self.apply(weights_initializer(self.lrelu_slope)) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: 0x00wolf/hkrsAI
# Path: src/pathfinder.py
class PathFinder:
"""Class that returns an object with necessary paths for runtime operations"""
def __init__(self, cwd: str):
self.cwd = cwd
self.config = f'{self.cwd}/config.json'
self.logs = f'{self.cwd}/logs'
self.prompts = f'{self.cwd}/prompts'
self._first_runtime()
self._prompts_dir_exists()
@staticmethod
def _get_cwd():
"""Fetch the current working directory"""
abs_path = os.path.abspath(__file__)
cwd = os.path.dirname(abs_path)
return cwd
def _first_runtime(self):
"""Initialize the config.json and logs directory if not present at runtime."""
self._init_cfg_json()
self._init_logs_dir()
def _prompts_dir_exists(self):
"""Check to see if the prompts directory is present, or print an error and exit."""
if not os.path.exists(self.prompts):
print('[*] error: prompts directory is missing')
sys.exit()
def _init_cfg_json(self):
"""Generate the config.json file."""
if not os.path.exists(self.config):
self._dump(CONFIG_INIT, self.config)
def _init_logs_dir(self):
"""Generate the logs directory"""
if not os.path.exists(self.logs):
os.makedirs(self.logs)
@staticmethod
def _dump(json_dict, json_file):
"""Dumps a JSON object to a file"""
with open(json_file, 'w') as f:
json.dump(json_dict, f, indent=6)
# Path: src/conversation.py
class Conversation:
messages: list[dict] = dataclasses.field(default_factory=list)
query: str = ''
reply: str = ''
response: dict = dataclasses.field(default_factory=dict)
tokens: int = 0
def start(self, system_prompt: str):
self.messages = [{"role": "system", "content": system_prompt}]
print()
return Conversation(messages=self.messages)
def speak(self, content: str):
self.messages.append({"role": "user", "content": content})
return Conversation(messages=self.messages, query=self.query, reply=self.reply, response=self.response)
def think(self, thought):
if self.query == '':
self.query = thought
else:
self.query = f'{self.query}\n{thought}'
return Conversation(messages=self.messages, query=self.query, reply=self.reply, response=self.response)
def listen(self, gpt: GPT):
"""Function to perform GPT chat completions via the API"""
self.response = gpt.client.chat.completions.create(
model=gpt.model,
messages=self.messages,
temperature=gpt.temperature,
top_p=gpt.top_p,
n=gpt.n,
max_tokens=gpt.max_tokens,
frequency_penalty=gpt.frequency_penalty,
presence_penalty=gpt.presence_penalty,
)
self.reply = self.response.choices[0].message.content
self.tokens = self.response.usage.total_tokens
print(f"\n{self.reply}\n")
self.messages.append({"role": "assistant", "content": self.reply})
return Conversation(messages=self.messages, query=self.query, reply=self.reply, response=self.response)
def breath(self):
return Conversation(messages=self.messages, query='', reply=self.reply, response=self.response)
@staticmethod
def greet():
return Conversation(messages=[], query='', reply='', response=None)
# Path: src/logger.py
import os
import re
import json
from typing import Type
from src.pathfinder import PathFinder
from src.conversation import Conversation
class Logger:
def __init__(self, paths: PathFinder, log_level: int, log_format: str):
"""Logs conversations and saves data at the user's request"""
self.level: int = log_level
self.format: str = log_format
self.paths: Paths = paths
self.number: int = 0
self.file: str = ''
self.savefile: str = ''
self.save_number: int = 0
self.new_log()
@property
def level(self):
return self._level
@level.setter
def level(self, new_value: int):
if 1 != new_value != 2:
raise TypeError
else:
self._level = new_value
@property
def format(self):
return self._format
@format.setter
def format(self, new_value: str):
if new_value == 'txt' or new_value == 'json':
self._format = new_value
else:
self._format = new_value
def new_log(self):
self.number = self._next_number()
self.file = self._new_file()
def _next_number(self):
"""Fetch the next log number from config.json and updates it"""
config_data = self._load(self.paths.config)
self.number = log_num = config_data['log_number']
config_data['log_number'] = self.number + 1
self._dump(config_data, self.paths.config)
return self.number
def _new_file(self):
"""Generates a new logfile relative the current log number"""
while True: # to prevent inadvertently overwriting logs if the value is changed in config.json
self.file = f'{self.paths.logs}/log{self.number}.{self.format}'
try:
with open(self.file, 'x'):
print(f'[*] logfile generated ~ {self.file}')
return self.file
except FileExistsError:
self.number += 1
| def log(self, conversation: Conversation): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ccurme/chesster
# Path: chesster/app/utils.py
def display_board(board, player_side: chess.Color) -> None:
"""Display board."""
board_size = 360
if player_side == chess.WHITE:
flipped = False
else:
flipped = True
if board.move_stack:
last_move = board.move_stack[-1]
else:
last_move = None
return chess.svg.board(board, flipped=flipped, size=board_size, lastmove=last_move)
# Path: chesster/app/utils.py
def get_engine_score(board: chess.Board, player_side: chess.Color) -> int:
"""Get board score in centipawns."""
engine = get_stockfish_engine()
analysis = engine.analyse(board, chess.engine.Limit(time=0.1))
engine.quit()
score = analysis["score"]
if player_side == chess.WHITE:
return score.white().score()
else:
return score.black().score()
# Path: chesster/app/utils.py
def serialize_board_state_with_last_move(
board: chess.Board, player_side: chess.Color
) -> str:
"""Make message capturing board state."""
board_state_str = f"""
Player is playing as {serialize_player_side(player_side)}.
Current board state:
{serialize_board_state(board, player_side)}
"""
if board.move_stack:
last_move = board.pop()
last_move_san = board.san(last_move)
board.push(last_move)
if board.turn == player_side:
last_to_move = "Opponent"
else:
last_to_move = "Player"
previous_move_str = f"""{last_to_move} last move:
{last_move_san}
"""
else:
previous_move_str = "No moves yet."
return _clean_up_prompt(
f"""
{board_state_str}
{previous_move_str}
"""
).strip()
# Path: chesster/app/board_manager.py
import os
import urllib
import chess
from typing import Iterator
from fastapi import WebSocket, WebSocketDisconnect
from langserve import RemoteRunnable
from chesster.app.utils import (
display_board,
get_engine_score,
serialize_board_state_with_last_move,
)
LANGSERVE_HOST = os.getenv("LANGSERVE_HOST", "localhost")
LANGSERVE_SECRET = os.getenv("LANGSERVE_SECRET", "secret")
CHAT_HISTORY_LENGTH = 50 # Number of most recent (human, ai) exchanges to retain.
class BoardManager:
def __init__(self):
self.active_websockets: list[WebSocket] = []
self.last_updated_image = None
self.board = chess.Board()
self.player_side = chess.WHITE
self.interesting_move_iterator = None
self.chat_history = []
self.remote_runnable = RemoteRunnable(
f"http://{LANGSERVE_HOST}:8001/chesster", headers={"x-token": LANGSERVE_SECRET}
)
async def set_board(self, board: chess.Board) -> None:
"""Set board."""
self.board = board
await self.update_board(self.board)
async def set_player_side(self, player_side: chess.Color) -> None:
"""Set player side."""
self.player_side = player_side
await self.update_board(self.board)
async def set_interesting_move_iterator(self) -> None:
"""Calculate interesting moves in board's move stack."""
self.interesting_move_iterator = self._interesting_move_iterator()
async def make_move(self, move: chess.Move) -> None:
"""Parse move and update board."""
self.board.push(move)
await self.update_board(self.board)
async def _interesting_move_iterator(
self, centipawn_threshold: int = 100
) -> Iterator[chess.Board]:
"""Make iterator over interesting moves according to Chess engine."""
new_board = chess.Board()
centipawns = 0
for move in self.board.move_stack:
new_board.push(move)
new_centipawns = get_engine_score(new_board, self.player_side)
if new_centipawns is None:
continue
delta = new_centipawns - centipawns
if new_board.turn != self.player_side: # player just moved
if abs(delta) > centipawn_threshold:
await self.update_board(new_board)
yield {
| "board": serialize_board_state_with_last_move( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: zkarpinski/codeinsight-sdk-python
# Path: codeinsight_sdk/client.py
class CodeInsightClient:
def __init__(self,
base_url: str,
api_token: str,
timeout: int = 60,
verify_ssl: bool = True
):
self.base_url = base_url
self.api_url = f"{base_url}/codeinsight/api"
self.__api_token = api_token
self.__api_headers = {
'Content-Type': 'application/json',
"Authorization": "Bearer %s" % self.__api_token,
"User-Agent": "codeinsight_sdk_python",
}
self.__timeout = timeout
self.__verify_ssl = verify_ssl
def request(self, method, url_part: str, params: dict = None, body: any = None ):
url = f"{self.api_url}/{url_part}"
# Iterate over params and remove any that are None (Empty)
if(params):
for k, v in list(params.items()):
if v is None:
del params[k]
response = requests.request(method, url,
headers=self.__api_headers, params=params, json=body,
timeout=self.__timeout, verify=self.__verify_ssl)
if not response.ok:
logger.error(f"Error: {response.status_code} - {response.reason}", exc_info=True)
logger.error(response.text)
raise CodeInsightError(response)
return response
@property
def projects(self) -> ProjectHandler:
return ProjectHandler(self)
@property
def reports(self) -> ReportHandler:
return ReportHandler(self)
# Coming soon...?
def inventories(self):
raise NotImplementedError("Inventories are not yet implemented")
def vulnerabilites(self):
raise NotImplementedError
def users(self):
raise NotImplementedError
def licenses(self):
raise NotImplementedError
def tasks(self):
raise NotImplementedError
def rules(self):
raise NotImplementedError
def files(self):
raise NotImplementedError
def folders(self):
raise NotImplementedError
def jobs(self):
raise NotImplementedError
def components(self):
raise NotImplementedError
# Path: codeinsight_sdk/exceptions.py
class CodeInsightError(GenericError):
"""Error class for code insight API errors."""
def __init__(self, response: requests.Response):
try:
resp = response.json()
self.code = response.status_code
self.message = resp['Error: ']
self.arguments = resp['Arguments: ']
self.error = resp['Key: ']
self.add_note(f"Arguments: {self.arguments}")
super().__init__("Error: %s - %s" % (self.code, self.message))
except KeyError:
raise ValueError(f"Error parsing response: {resp}")
except json.decoder.JSONDecodeError:
raise ValueError(f"Error decoding response: {resp}")
# Path: tests/test_client.py
import pytest
import logging
import requests_mock
from codeinsight_sdk import CodeInsightClient
from codeinsight_sdk.exceptions import CodeInsightError
logger = logging.getLogger(__name__)
## CHANGE ME ##
TEST_URL = "https://api.revenera.com"
TEST_API_TOKEN = "your_api_token"
class TestCodeInsightClient:
@pytest.fixture
def client(self):
return CodeInsightClient(TEST_URL, TEST_API_TOKEN)
def test_client(self, client):
assert client.base_url == TEST_URL
def test_endpoint_not_found(self, client):
with requests_mock.Mocker() as m:
m.get(f"{TEST_URL}/codeinsight/api/projects", status_code=404)
with pytest.raises(Exception):
client.projects.all()
class TestProjectEndpoints:
@pytest.fixture
def client(self):
return CodeInsightClient(TEST_URL, TEST_API_TOKEN)
def test_create_project(self, client):
project_name = "Test"
with requests_mock.Mocker() as m:
m.post(f"{TEST_URL}/codeinsight/api/projects", text='{"data": {"id":1}}')
project_id = client.projects.create(project_name)
assert project_id == 1
def test_get_all_projects(self, client):
with requests_mock.Mocker() as m:
m.get(f"{TEST_URL}/codeinsight/api/projects", text='{"data": [{"id":1, "name":"Test"}, {"id":2, "name":"Test 2"}]}')
projects = client.projects.all()
assert len(projects) > 0
def test_get_project_id(self, client):
project_name = "Test"
with requests_mock.Mocker() as m:
m.get(f"{TEST_URL}/codeinsight/api/project/id", text='{ "Content: ": 1 }') # Yes, the key is called 'Content: ' ...
project_id = client.projects.get_id(project_name)
assert project_id == 1
def test_get_project_id_invalid(self,client):
project_name = "Invalid_Project"
fake_response_json = """{ "Arguments: " : ["",""],
"Key: ": " InvalidProjectNameParm",
"Error: ": "The project name entered was not found" }
"""
with requests_mock.Mocker() as m:
# Note, the key names end with a colon and space '...: '
m.get(f"{TEST_URL}/codeinsight/api/project/id", text=fake_response_json, status_code=400)
| with pytest.raises(CodeInsightError): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: chebupelka8/Engine
# Path: scripts/math.py
class Vec2:
def __init__(self, x: int | float, y: int | float) -> None:
self.__verify(x, y)
self.__x = x
self.__y = y
@staticmethod
def __verify(x, y) -> None:
match x, y:
case x, y if all(map(lambda a: isinstance(a, (int, float)), [x, y])):
...
case _:
raise ValueError("Arguments 'x' and 'y' should be 'int' or 'float'")
@property
def x(self) -> int | float:
return self.__x
@x.setter
def x(self, __value: int | float) -> None:
self.__x = __value
@property
def y(self) -> int | float:
return self.__y
@y.setter
def y(self, __value: int | float) -> None:
self.__y = __value
@property
def xy(self) -> list:
return [self.__x, self.__y]
def __repr__(self) -> str:
return f"Vec2(x={self.__x}, y={self.__y})"
def __getitem__(self, __index) -> int | float:
return [self.__x, self.__y][__index]
def __setitem__(self, __index, __value) -> None:
res = [self.__x, self.__y]
res[__index] = __value
self.__verify(*res)
self.__x, self.__y = res
def __abs__(self):
return Vec2(abs(self.__x), abs(self.__y))
def __add__(self, __other):
if not isinstance(__other, Vec2): raise TypeError("Argument should be 'Vec2'")
return Vec2(self.__x + __other.x, self.__y + __other.y)
def __mul__(self, __other):
if not isinstance(__other, Vec2): raise TypeError("Argument should be 'Vec2'")
return Vec2(self.__x * __other.x, self.__y * __other.y)
# Path: scripts/image.py
class Image:
def __init__(self, __arg: str | pygame.Surface, should_convert: bool = True) -> None:
self.__image = self.__load(__arg)
if should_convert: self.__image = self.__image.convert_alpha()
@classmethod
def __load(cls, __arg: str | pygame.Surface) -> pygame.Surface:
cls.__verify(__arg)
return pygame.image.load(__arg) if isinstance(__arg, str) else __arg
@staticmethod
def __verify(__arg: Any) -> None:
if not type(__arg) in (str, pygame.Surface): raise TypeError(f"Argument should be a string or a 'Surface', not {type(__arg)}")
@property
def image(self) -> pygame.Surface:
return self.__image
@property
def size(self) -> Vec2:
return Vec2(*self.__image.get_size())
@image.setter
def image(self, image: pygame.Surface) -> None:
self.__image = image
def __repr__(self) -> str:
return f"Image(size={self.image.get_size()}, alpha={self.image.get_alpha()})"
# Path: scripts/loop.py
import pygame, sys
from pygame.locals import *
from .math import Vec2
from .image import Image
class WindowLoop:
def __init__(self, __size: Vec2, fps: int = 144) -> None:
pygame.init()
self.__display = pygame.display.set_mode((__size.x, __size.y))
pygame.display.set_caption("Engine: v0.1")
| pygame.display.set_icon(Image("Engine/assets/icon.png").image)
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: lxbme/TSPLifesaver
# Path: TSPLifesaver/abc/abc.py
class AbstractPoint(ABC, MutableSequence):
def __delitem__(self, key): ...
def insert(self, index, value): ...
@abstractmethod
def __init__(self,pos):
"""
Init the Point
:param pos:
"""
@property
def name(self):
"""
The name of the Point.
:return: Any
"""
return None
@abstractmethod
def distance_to(self, other: MutableSequence):
"""
Calculate the distance between this Point and another.
:param other:
:return The distance between the:
"""
# Path: TSPLifesaver/abc/abc.py
class AbstractRoute(ABC, MutableSequence):
@abstractmethod
def swap(self, index_1: int, index_2: int) -> None:
"""
This method should swap the positions of the two elements by indexes.
"""
@abstractmethod
def distance(self):
"""
This method should return the total length of the route.
:return Number: The total length of the route:
"""
# Path: TSPLifesaver/structure.py
class BasicRoute(AbstractRoute):
def __init__(self, points: MutableSequence[AbstractPoint], name="BasicRoute"):
self.points = points
self.name = name
def __iter__(self):
return iter(self.points)
def __getitem__(self, item):
return self.points[item]
def __setitem__(self, item, value):
self.points[item] = value
def __delitem__(self, item):
del self.points[item]
def __len__(self):
return len(self.points)
def __str__(self):
string = self.name + "(\n"
for point in self.points:
string += f"{point.name}: {[point[i] for i in range(len(point))]}\n"
string += ")"
return string
def insert(self, index, value):
self.points.insert(index, value)
def distance(self):
"""
Calculates the total distance.
:return:
"""
return sum([pre.distance_to(after) for pre, after in zip(self[:-1], self[1:])])
def swap(self, index_1: int, index_2: int) -> None:
"""
Swaps two points
:param index_1:
:param index_2:
:return:
"""
self[index_1], self[index_2] = self[index_2], self[index_1]
def append(self, value: AbstractPoint):
self.points.append(value)
# Path: TSPLifesaver/structure.py
class PointWithEuclideanDistance(BasicPoint):
def __init__(self, pos: MutableSequence, name: Any = None):
super().__init__(pos, name)
# Path: TSPLifesaver/optimizer.py
class SimulatedAnnealing(AbstractOptimizer):
def __init__(self, initial_route: AbstractRoute, temperature, cooling_rate, min_temperature):
"""
:param initial_route:
:param initial_route:
:param temperature:
:param cooling_rate:
:param min_temperature:
"""
self.current_route = deepcopy(initial_route)
self.best_route = deepcopy(initial_route)
self.temperature = temperature
self.cooling_rate = cooling_rate
self.min_temperature = min_temperature
def optimize(self):
while self.temperature > self.min_temperature:
new_route = deepcopy(self.current_route)
# exchange randomly
i, j = random.sample(range(len(new_route)), 2)
new_route.swap(i, j)
# calc cost
current_cost = self.current_route.distance()
new_cost = new_route.distance()
cost_difference = current_cost - new_cost
# accepting the new result?
if cost_difference > 0 or math.exp(cost_difference / self.temperature) > random.random():
self.current_route = new_route
if new_cost < self.best_route.distance():
self.best_route = new_route
# decrease the temperature
self.temperature *= (1 - self.cooling_rate)
return self.best_route
# Path: TSPLifesaver/tools.py
from typing import Iterable, MutableSequence, Type
from random import shuffle
from copy import deepcopy
from TSPLifesaver.abc import AbstractRoute, AbstractPoint
from TSPLifesaver.structure import BasicRoute, PointWithEuclideanDistance
from TSPLifesaver.optimizer import SimulatedAnnealing
def route_from_sequence(sequence: Iterable[MutableSequence], route: AbstractRoute = BasicRoute([]),
point_class: Type[AbstractPoint] = PointWithEuclideanDistance,
name_offset: int = 1, ) -> AbstractRoute:
"""
:param route: Instances of the AbstractRoute class or its subclasses, defaults to empty instance of BasicRoute
:param name_offset: Index of the name
:param sequence: Sequence containing coordinates
:param point_class: AbstractPoint or its subclasses ,defaults to PointWithEuclideanDistance
:return: a new route
"""
index = name_offset
for pos in sequence:
try:
point = point_class(pos, name=f"{index}")
except:
point = point_class(pos)
route.append(point)
index += 1
return route
def simulated_annealing(route: AbstractRoute, epoch: int = 100, temperature: float = 10000,
cooling_rate: float = 0.03, min_temperature: float = 1,
log: bool = False) -> AbstractRoute:
"""
:param route: Instances of the AbstractRoute class or its subclasses
:param epoch: Number of epochs to simulate, defaults to 100
:param temperature: Temperature of the annealing, defaults to 10000
:param cooling_rate: Cooling rate of the annealing, defaults to 0.03
:param min_temperature: Minimum temperature of the annealing, defaults to 1
:param log: Whether to print the log of the annealing, defaults to False
:return: optimized route
"""
if len(route):
best_route = deepcopy(route)
for i in range(epoch):
if log:
print(f"Running epoch {i} of {epoch}")
shuffle(route)
| opt = SimulatedAnnealing(route, temperature=temperature, |