repo_name
stringlengths 7
71
| file_path
stringlengths 5
118
| context
list | import_statement
stringlengths 45
12.5k
| token_num
int64 641
99.4k
| cropped_code
stringlengths 44
17k
| all_code
stringlengths 43
754k
| next_line
stringlengths 2
330
| gold_snippet_index
int64 0
68
| created_at
stringlengths 25
25
| level
stringclasses 9
values |
---|---|---|---|---|---|---|---|---|---|---|
DLYuanGod/TinyGPT-V | minigpt4/processors/blip_processors.py | [
{
"identifier": "registry",
"path": "minigpt4/common/registry.py",
"snippet": "class Registry:\n def register_builder(cls, name):\n def wrap(builder_cls):\n def register_task(cls, name):\n def wrap(task_cls):\n def register_model(cls, name):\n def wrap(model_cls):\n def register_processor(cls, name):\n def wrap(processor_cls):\n def register_lr_scheduler(cls, name):\n def wrap(lr_sched_cls):\n def register_runner(cls, name):\n def wrap(runner_cls):\n def register_path(cls, name, path):\n def register(cls, name, obj):\n def get_builder_class(cls, name):\n def get_model_class(cls, name):\n def get_task_class(cls, name):\n def get_processor_class(cls, name):\n def get_lr_scheduler_class(cls, name):\n def get_runner_class(cls, name):\n def list_runners(cls):\n def list_models(cls):\n def list_tasks(cls):\n def list_processors(cls):\n def list_lr_schedulers(cls):\n def list_datasets(cls):\n def get_path(cls, name):\n def get(cls, name, default=None, no_warning=False):\n def unregister(cls, name):"
},
{
"identifier": "BaseProcessor",
"path": "minigpt4/processors/base_processor.py",
"snippet": "class BaseProcessor:\n def __init__(self):\n self.transform = lambda x: x\n return\n\n def __call__(self, item):\n return self.transform(item)\n\n @classmethod\n def from_config(cls, cfg=None):\n return cls()\n\n def build(self, **kwargs):\n cfg = OmegaConf.create(kwargs)\n\n return self.from_config(cfg)"
},
{
"identifier": "RandomAugment",
"path": "minigpt4/processors/randaugment.py",
"snippet": "class RandomAugment(object):\n def __init__(self, N=2, M=10, isPIL=False, augs=[]):\n self.N = N\n self.M = M\n self.isPIL = isPIL\n if augs:\n self.augs = augs\n else:\n self.augs = list(arg_dict.keys())\n\n def get_random_ops(self):\n sampled_ops = np.random.choice(self.augs, self.N)\n return [(op, 0.5, self.M) for op in sampled_ops]\n\n def __call__(self, img):\n if self.isPIL:\n img = np.array(img)\n ops = self.get_random_ops()\n for name, prob, level in ops:\n if np.random.random() > prob:\n continue\n args = arg_dict[name](level)\n img = func_dict[name](img, *args)\n return img"
}
] | import re
from minigpt4.common.registry import registry
from minigpt4.processors.base_processor import BaseProcessor
from minigpt4.processors.randaugment import RandomAugment
from omegaconf import OmegaConf
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode | 756 | """
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
| """
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
| class BlipImageBaseProcessor(BaseProcessor): | 1 | 2023-12-28 05:47:18+00:00 | 2k |
jianchang512/vocal-separate | start.py | [
{
"identifier": "cfg",
"path": "vocal/cfg.py",
"snippet": "LANG = \"en\" if locale.getdefaultlocale()[0].split('_')[0].lower() != 'zh' else \"zh\"\nROOT_DIR = os.getcwd()\nMODEL_DIR = os.path.join(ROOT_DIR, 'pretrained_models')\nSTATIC_DIR = os.path.join(ROOT_DIR, 'static')\nTMP_DIR = os.path.join(STATIC_DIR, 'tmp')\nFILES_DIR = os.path.join(STATIC_DIR, 'files')"
},
{
"identifier": "tool",
"path": "vocal/tool.py",
"snippet": "def runffmpeg(arg):\ndef checkupdate():\ndef openweb(web_address):"
},
{
"identifier": "ROOT_DIR",
"path": "vocal/cfg.py",
"snippet": "ROOT_DIR = os.getcwd()"
}
] | import logging
import threading
import sys
import os
import subprocess
from flask import Flask, request, render_template, jsonify, send_from_directory
from gevent.pywsgi import WSGIServer, WSGIHandler,LoggingLogAdapter
from logging.handlers import RotatingFileHandler
from vocal import cfg, tool
from vocal.cfg import ROOT_DIR
from spleeter.separator import Separator | 795 |
class CustomRequestHandler(WSGIHandler):
def log_request(self):
pass
# 禁用 Werkzeug 默认的日志处理器
log = logging.getLogger('werkzeug')
log.handlers[:] = []
log.setLevel(logging.WARNING)
app = Flask(__name__, static_folder=os.path.join(ROOT_DIR, 'static'), static_url_path='/static',
template_folder=os.path.join(ROOT_DIR, 'templates'))
root_log = logging.getLogger() # Flask的根日志记录器
root_log.handlers = []
root_log.setLevel(logging.WARNING)
# 配置日志
app.logger.setLevel(logging.WARNING) # 设置日志级别为 INFO
# 创建 RotatingFileHandler 对象,设置写入的文件路径和大小限制
file_handler = RotatingFileHandler(os.path.join(ROOT_DIR, 'vocal.log'), maxBytes=1024 * 1024, backupCount=5)
# 创建日志的格式
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# 设置文件处理器的级别和格式
file_handler.setLevel(logging.WARNING)
file_handler.setFormatter(formatter)
# 将文件处理器添加到日志记录器中
app.logger.addHandler(file_handler)
@app.route('/static/<path:filename>')
def static_files(filename):
return send_from_directory(app.config['STATIC_FOLDER'], filename)
@app.route('/')
def index():
return render_template("index.html",cuda=cfg.cuda, language=cfg.LANG,root_dir=ROOT_DIR.replace('\\', '/'))
# 上传音频
@app.route('/upload', methods=['POST'])
def upload():
try:
# 获取上传的文件
audio_file = request.files['audio']
# 如果是mp4
noextname, ext = os.path.splitext(audio_file.filename)
ext = ext.lower()
# 如果是视频,先分离
wav_file = os.path.join(cfg.TMP_DIR, f'{noextname}.wav')
if os.path.exists(wav_file) and os.path.getsize(wav_file) > 0:
return jsonify({'code': 0, 'msg': cfg.transobj['lang1'], "data": os.path.basename(wav_file)})
msg=""
if ext in ['.mp4', '.mov', '.avi', '.mkv', '.mpeg', '.mp3', '.flac']:
video_file = os.path.join(cfg.TMP_DIR, f'{noextname}{ext}')
audio_file.save(video_file)
params = [
"-i",
video_file,
]
if ext not in ['.mp3', '.flac']:
params.append('-vn')
params.append(wav_file)
|
class CustomRequestHandler(WSGIHandler):
def log_request(self):
pass
# 禁用 Werkzeug 默认的日志处理器
log = logging.getLogger('werkzeug')
log.handlers[:] = []
log.setLevel(logging.WARNING)
app = Flask(__name__, static_folder=os.path.join(ROOT_DIR, 'static'), static_url_path='/static',
template_folder=os.path.join(ROOT_DIR, 'templates'))
root_log = logging.getLogger() # Flask的根日志记录器
root_log.handlers = []
root_log.setLevel(logging.WARNING)
# 配置日志
app.logger.setLevel(logging.WARNING) # 设置日志级别为 INFO
# 创建 RotatingFileHandler 对象,设置写入的文件路径和大小限制
file_handler = RotatingFileHandler(os.path.join(ROOT_DIR, 'vocal.log'), maxBytes=1024 * 1024, backupCount=5)
# 创建日志的格式
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# 设置文件处理器的级别和格式
file_handler.setLevel(logging.WARNING)
file_handler.setFormatter(formatter)
# 将文件处理器添加到日志记录器中
app.logger.addHandler(file_handler)
@app.route('/static/<path:filename>')
def static_files(filename):
return send_from_directory(app.config['STATIC_FOLDER'], filename)
@app.route('/')
def index():
return render_template("index.html",cuda=cfg.cuda, language=cfg.LANG,root_dir=ROOT_DIR.replace('\\', '/'))
# 上传音频
@app.route('/upload', methods=['POST'])
def upload():
try:
# 获取上传的文件
audio_file = request.files['audio']
# 如果是mp4
noextname, ext = os.path.splitext(audio_file.filename)
ext = ext.lower()
# 如果是视频,先分离
wav_file = os.path.join(cfg.TMP_DIR, f'{noextname}.wav')
if os.path.exists(wav_file) and os.path.getsize(wav_file) > 0:
return jsonify({'code': 0, 'msg': cfg.transobj['lang1'], "data": os.path.basename(wav_file)})
msg=""
if ext in ['.mp4', '.mov', '.avi', '.mkv', '.mpeg', '.mp3', '.flac']:
video_file = os.path.join(cfg.TMP_DIR, f'{noextname}{ext}')
audio_file.save(video_file)
params = [
"-i",
video_file,
]
if ext not in ['.mp3', '.flac']:
params.append('-vn')
params.append(wav_file) | rs = tool.runffmpeg(params) | 1 | 2023-12-26 06:20:35+00:00 | 2k |
ali-vilab/dreamtalk | core/networks/dynamic_fc_decoder.py | [
{
"identifier": "_get_activation_fn",
"path": "core/networks/transformer.py",
"snippet": "def _get_activation_fn(activation):\r\n \"\"\"Return an activation function given a string\"\"\"\r\n if activation == \"relu\":\r\n return F.relu\r\n if activation == \"gelu\":\r\n return F.gelu\r\n if activation == \"glu\":\r\n return F.glu\r\n raise RuntimeError(F\"activation should be relu/gelu, not {activation}.\")\r"
},
{
"identifier": "_get_clones",
"path": "core/networks/transformer.py",
"snippet": "def _get_clones(module, N):\r\n return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\r"
},
{
"identifier": "DynamicLinear",
"path": "core/networks/dynamic_linear.py",
"snippet": "class DynamicLinear(nn.Module):\n def __init__(self, in_planes, out_planes, cond_planes, bias=True, K=4, temperature=30, ratio=4, init_weight=True):\n super().__init__()\n\n self.dynamic_conv = DynamicConv(\n in_planes,\n out_planes,\n cond_planes,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=bias,\n K=K,\n ratio=ratio,\n temperature=temperature,\n init_weight=init_weight,\n )\n\n def forward(self, x, cond):\n \"\"\"\n\n Args:\n x (_type_): (L, B, C_in)\n cond (_type_): (B, C_style)\n\n Returns:\n _type_: (L, B, C_out)\n \"\"\"\n x = x.permute(1, 2, 0).unsqueeze(-1)\n out = self.dynamic_conv(x, cond)\n # (B, C_out, L, 1)\n out = out.squeeze().permute(2, 0, 1)\n return out"
}
] | import torch.nn as nn
import torch
from core.networks.transformer import _get_activation_fn, _get_clones
from core.networks.dynamic_linear import DynamicLinear | 1,476 |
class DynamicFCDecoderLayer(nn.Module):
def __init__(
self,
d_model,
nhead,
d_style,
dynamic_K,
dynamic_ratio,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
# self.linear1 = nn.Linear(d_model, dim_feedforward)
self.linear1 = DynamicLinear(d_model, dim_feedforward, d_style, K=dynamic_K, ratio=dynamic_ratio)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
# self.linear2 = DynamicLinear(dim_feedforward, d_model, d_style, K=dynamic_K, ratio=dynamic_ratio)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos):
return tensor if pos is None else tensor + pos
def forward_post(
self,
tgt,
memory,
style,
tgt_mask=None,
memory_mask=None,
tgt_key_padding_mask=None,
memory_key_padding_mask=None,
pos=None,
query_pos=None,
):
# q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(tgt, tgt, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(
query=tgt, key=memory, value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask
)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
# tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt, style))), style)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt, style))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
# def forward_pre(
# self,
# tgt,
# memory,
# tgt_mask=None,
# memory_mask=None,
# tgt_key_padding_mask=None,
# memory_key_padding_mask=None,
# pos=None,
# query_pos=None,
# ):
# tgt2 = self.norm1(tgt)
# # q = k = self.with_pos_embed(tgt2, query_pos)
# tgt2 = self.self_attn(tgt2, tgt2, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0]
# tgt = tgt + self.dropout1(tgt2)
# tgt2 = self.norm2(tgt)
# tgt2 = self.multihead_attn(
# query=tgt2, key=memory, value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask
# )[0]
# tgt = tgt + self.dropout2(tgt2)
# tgt2 = self.norm3(tgt)
# tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
# tgt = tgt + self.dropout3(tgt2)
# return tgt
def forward(
self,
tgt,
memory,
style,
tgt_mask=None,
memory_mask=None,
tgt_key_padding_mask=None,
memory_key_padding_mask=None,
pos=None,
query_pos=None,
):
if self.normalize_before:
raise NotImplementedError
# return self.forward_pre(
# tgt, memory, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos
# )
return self.forward_post(
tgt, memory, style, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos
)
class DynamicFCDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
|
class DynamicFCDecoderLayer(nn.Module):
def __init__(
self,
d_model,
nhead,
d_style,
dynamic_K,
dynamic_ratio,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
# self.linear1 = nn.Linear(d_model, dim_feedforward)
self.linear1 = DynamicLinear(d_model, dim_feedforward, d_style, K=dynamic_K, ratio=dynamic_ratio)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
# self.linear2 = DynamicLinear(dim_feedforward, d_model, d_style, K=dynamic_K, ratio=dynamic_ratio)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos):
return tensor if pos is None else tensor + pos
def forward_post(
self,
tgt,
memory,
style,
tgt_mask=None,
memory_mask=None,
tgt_key_padding_mask=None,
memory_key_padding_mask=None,
pos=None,
query_pos=None,
):
# q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(tgt, tgt, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(
query=tgt, key=memory, value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask
)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
# tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt, style))), style)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt, style))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
# def forward_pre(
# self,
# tgt,
# memory,
# tgt_mask=None,
# memory_mask=None,
# tgt_key_padding_mask=None,
# memory_key_padding_mask=None,
# pos=None,
# query_pos=None,
# ):
# tgt2 = self.norm1(tgt)
# # q = k = self.with_pos_embed(tgt2, query_pos)
# tgt2 = self.self_attn(tgt2, tgt2, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0]
# tgt = tgt + self.dropout1(tgt2)
# tgt2 = self.norm2(tgt)
# tgt2 = self.multihead_attn(
# query=tgt2, key=memory, value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask
# )[0]
# tgt = tgt + self.dropout2(tgt2)
# tgt2 = self.norm3(tgt)
# tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
# tgt = tgt + self.dropout3(tgt2)
# return tgt
def forward(
self,
tgt,
memory,
style,
tgt_mask=None,
memory_mask=None,
tgt_key_padding_mask=None,
memory_key_padding_mask=None,
pos=None,
query_pos=None,
):
if self.normalize_before:
raise NotImplementedError
# return self.forward_pre(
# tgt, memory, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos
# )
return self.forward_post(
tgt, memory, style, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos
)
class DynamicFCDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__() | self.layers = _get_clones(decoder_layer, num_layers) | 1 | 2023-12-28 05:39:31+00:00 | 2k |
jiawei-ren/dreamgaussian4d | diffusers/src/diffusers/models/activations.py | [
{
"identifier": "USE_PEFT_BACKEND",
"path": "diffusers/src/diffusers/utils/constants.py",
"snippet": "USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version"
},
{
"identifier": "LoRACompatibleLinear",
"path": "diffusers/src/diffusers/models/lora.py",
"snippet": "class LoRACompatibleLinear(nn.Linear):\n \"\"\"\n A Linear layer that can be used with LoRA.\n \"\"\"\n\n def __init__(self, *args, lora_layer: Optional[LoRALinearLayer] = None, **kwargs):\n super().__init__(*args, **kwargs)\n self.lora_layer = lora_layer\n\n def set_lora_layer(self, lora_layer: Optional[LoRALinearLayer]):\n self.lora_layer = lora_layer\n\n def _fuse_lora(self, lora_scale: float = 1.0, safe_fusing: bool = False):\n if self.lora_layer is None:\n return\n\n dtype, device = self.weight.data.dtype, self.weight.data.device\n\n w_orig = self.weight.data.float()\n w_up = self.lora_layer.up.weight.data.float()\n w_down = self.lora_layer.down.weight.data.float()\n\n if self.lora_layer.network_alpha is not None:\n w_up = w_up * self.lora_layer.network_alpha / self.lora_layer.rank\n\n fused_weight = w_orig + (lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0])\n\n if safe_fusing and torch.isnan(fused_weight).any().item():\n raise ValueError(\n \"This LoRA weight seems to be broken. \"\n f\"Encountered NaN values when trying to fuse LoRA weights for {self}.\"\n \"LoRA weights will not be fused.\"\n )\n\n self.weight.data = fused_weight.to(device=device, dtype=dtype)\n\n # we can drop the lora layer now\n self.lora_layer = None\n\n # offload the up and down matrices to CPU to not blow the memory\n self.w_up = w_up.cpu()\n self.w_down = w_down.cpu()\n self._lora_scale = lora_scale\n\n def _unfuse_lora(self):\n if not (getattr(self, \"w_up\", None) is not None and getattr(self, \"w_down\", None) is not None):\n return\n\n fused_weight = self.weight.data\n dtype, device = fused_weight.dtype, fused_weight.device\n\n w_up = self.w_up.to(device=device).float()\n w_down = self.w_down.to(device).float()\n\n unfused_weight = fused_weight.float() - (self._lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0])\n self.weight.data = unfused_weight.to(device=device, dtype=dtype)\n\n self.w_up = None\n self.w_down = None\n\n def forward(self, hidden_states: torch.Tensor, scale: float = 1.0) -> torch.Tensor:\n if self.lora_layer is None:\n out = super().forward(hidden_states)\n return out\n else:\n out = super().forward(hidden_states) + (scale * self.lora_layer(hidden_states))\n return out"
}
] | import torch
import torch.nn.functional as F
from torch import nn
from ..utils import USE_PEFT_BACKEND
from .lora import LoRACompatibleLinear | 1,423 | # coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ACTIVATION_FUNCTIONS = {
"swish": nn.SiLU(),
"silu": nn.SiLU(),
"mish": nn.Mish(),
"gelu": nn.GELU(),
"relu": nn.ReLU(),
}
def get_activation(act_fn: str) -> nn.Module:
"""Helper function to get activation function from string.
Args:
act_fn (str): Name of activation function.
Returns:
nn.Module: Activation function.
"""
act_fn = act_fn.lower()
if act_fn in ACTIVATION_FUNCTIONS:
return ACTIVATION_FUNCTIONS[act_fn]
else:
raise ValueError(f"Unsupported activation function: {act_fn}")
class GELU(nn.Module):
r"""
GELU activation function with tanh approximation support with `approximate="tanh"`.
Parameters:
dim_in (`int`): The number of channels in the input.
dim_out (`int`): The number of channels in the output.
approximate (`str`, *optional*, defaults to `"none"`): If `"tanh"`, use tanh approximation.
"""
def __init__(self, dim_in: int, dim_out: int, approximate: str = "none"):
super().__init__()
self.proj = nn.Linear(dim_in, dim_out)
self.approximate = approximate
def gelu(self, gate: torch.Tensor) -> torch.Tensor:
if gate.device.type != "mps":
return F.gelu(gate, approximate=self.approximate)
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.float32), approximate=self.approximate).to(dtype=gate.dtype)
def forward(self, hidden_states):
hidden_states = self.proj(hidden_states)
hidden_states = self.gelu(hidden_states)
return hidden_states
class GEGLU(nn.Module):
r"""
A [variant](https://arxiv.org/abs/2002.05202) of the gated linear unit activation function.
Parameters:
dim_in (`int`): The number of channels in the input.
dim_out (`int`): The number of channels in the output.
"""
def __init__(self, dim_in: int, dim_out: int):
super().__init__()
| # coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ACTIVATION_FUNCTIONS = {
"swish": nn.SiLU(),
"silu": nn.SiLU(),
"mish": nn.Mish(),
"gelu": nn.GELU(),
"relu": nn.ReLU(),
}
def get_activation(act_fn: str) -> nn.Module:
"""Helper function to get activation function from string.
Args:
act_fn (str): Name of activation function.
Returns:
nn.Module: Activation function.
"""
act_fn = act_fn.lower()
if act_fn in ACTIVATION_FUNCTIONS:
return ACTIVATION_FUNCTIONS[act_fn]
else:
raise ValueError(f"Unsupported activation function: {act_fn}")
class GELU(nn.Module):
r"""
GELU activation function with tanh approximation support with `approximate="tanh"`.
Parameters:
dim_in (`int`): The number of channels in the input.
dim_out (`int`): The number of channels in the output.
approximate (`str`, *optional*, defaults to `"none"`): If `"tanh"`, use tanh approximation.
"""
def __init__(self, dim_in: int, dim_out: int, approximate: str = "none"):
super().__init__()
self.proj = nn.Linear(dim_in, dim_out)
self.approximate = approximate
def gelu(self, gate: torch.Tensor) -> torch.Tensor:
if gate.device.type != "mps":
return F.gelu(gate, approximate=self.approximate)
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.float32), approximate=self.approximate).to(dtype=gate.dtype)
def forward(self, hidden_states):
hidden_states = self.proj(hidden_states)
hidden_states = self.gelu(hidden_states)
return hidden_states
class GEGLU(nn.Module):
r"""
A [variant](https://arxiv.org/abs/2002.05202) of the gated linear unit activation function.
Parameters:
dim_in (`int`): The number of channels in the input.
dim_out (`int`): The number of channels in the output.
"""
def __init__(self, dim_in: int, dim_out: int):
super().__init__() | linear_cls = LoRACompatibleLinear if not USE_PEFT_BACKEND else nn.Linear | 1 | 2023-12-28 08:17:40+00:00 | 2k |
Meituan-AutoML/MobileVLM | mobilevlm/model/mobilevlm.py | [
{
"identifier": "build_vision_tower",
"path": "mobilevlm/model/vision_encoder.py",
"snippet": "def build_vision_tower(model_cfg, **kwargs):\n vision_tower = getattr(model_cfg, 'mm_vision_tower', getattr(model_cfg, 'vision_tower', None))\n is_absolute_path_exists = os.path.exists(vision_tower)\n if is_absolute_path_exists or vision_tower.startswith(\"openai\") or vision_tower.startswith(\"laion\"):\n vision_tower_type = getattr(model_cfg, 'vision_tower_type', None)\n if vision_tower_type == \"clip\":\n return CLIPVisionTower(vision_tower, args=model_cfg, **kwargs)\n raise ValueError(f'Unknown vision tower: {vision_tower}')"
},
{
"identifier": "build_vision_projector",
"path": "mobilevlm/model/vision_projector.py",
"snippet": "def build_vision_projector(config, delay_load=False, **kwargs):\n projector_type = getattr(config, 'mm_projector_type', 'linear')\n\n if projector_type == 'linear':\n return nn.Linear(config.mm_hidden_size, config.hidden_size)\n elif projector_type.startswith('mlp'):\n mlp_gelu_match = re.match(r'^mlp(\\d+)x_gelu$', projector_type)\n if mlp_gelu_match:\n mlp_depth = int(mlp_gelu_match.group(1))\n modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)]\n for _ in range(1, mlp_depth):\n modules.append(nn.GELU())\n modules.append(nn.Linear(config.hidden_size, config.hidden_size))\n return nn.Sequential(*modules)\n elif projector_type.startswith('ldpnet'):\n return LDPNetProjector(config)\n raise ValueError(f'Unknown projector type: {projector_type}')"
},
{
"identifier": "IGNORE_INDEX",
"path": "mobilevlm/constants.py",
"snippet": "IGNORE_INDEX = -100"
},
{
"identifier": "IMAGE_TOKEN_INDEX",
"path": "mobilevlm/constants.py",
"snippet": "IMAGE_TOKEN_INDEX = -200"
},
{
"identifier": "DEFAULT_IMAGE_PATCH_TOKEN",
"path": "mobilevlm/constants.py",
"snippet": "DEFAULT_IMAGE_PATCH_TOKEN = \"<im_patch>\""
},
{
"identifier": "DEFAULT_IM_START_TOKEN",
"path": "mobilevlm/constants.py",
"snippet": "DEFAULT_IM_START_TOKEN = \"<im_start>\""
},
{
"identifier": "DEFAULT_IM_END_TOKEN",
"path": "mobilevlm/constants.py",
"snippet": "DEFAULT_IM_END_TOKEN = \"<im_end>\""
}
] | import torch
import torch.nn as nn
from abc import ABC, abstractmethod
from transformers import AutoTokenizer, BitsAndBytesConfig
from mobilevlm.model.vision_encoder import build_vision_tower
from mobilevlm.model.vision_projector import build_vision_projector
from mobilevlm.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, \
DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
from mobilevlm.model.mobilellama import MobileLlamaForCausalLM | 1,423 |
class MobileVLMMetaModel:
def __init__(self, config):
super(MobileVLMMetaModel, self).__init__(config)
if hasattr(config, "mm_vision_tower"):
self.vision_tower = build_vision_tower(config, delay_load=False)
self.mm_projector = build_vision_projector(config)
def get_vision_tower(self):
vision_tower = getattr(self, 'vision_tower', None)
if type(vision_tower) is list:
vision_tower = vision_tower[0]
return vision_tower
def initialize_vision_modules(self, model_args, fsdp=None):
mm_vision_select_layer = model_args.mm_vision_select_layer
mm_vision_select_feature = model_args.mm_vision_select_feature
pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter
self.config.mm_vision_tower = model_args.vision_tower
self.config.use_mm_proj = True
self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear')
self.config.mm_vision_select_layer = mm_vision_select_layer
self.config.mm_vision_select_feature = mm_vision_select_feature
# Build VisionTower
vision_tower = build_vision_tower(model_args)
if fsdp is not None and len(fsdp) > 0:
self.vision_tower = [vision_tower]
else:
self.vision_tower = vision_tower
self.config.mm_hidden_size = vision_tower.hidden_size
# Build Vision-Projector
self.mm_projector = build_vision_projector(self.config)
if pretrain_mm_mlp_adapter is not None:
mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')
def get_w(weights, keyword):
return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k}
self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))
class MobileVLMMetaForCausalLM(ABC):
@abstractmethod
def get_model(self):
pass
def get_vision_tower(self):
return self.get_model().get_vision_tower()
def encode_images(self, images):
image_features = self.get_model().get_vision_tower()(images)
image_features = self.get_model().mm_projector(image_features)
return image_features
def prepare_inputs_labels_for_multimodal(
self, input_ids, attention_mask, past_key_values, labels, images
):
vision_tower = self.get_vision_tower()
if vision_tower is None or images is None or input_ids.shape[1] == 1:
if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:
attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)
return input_ids, attention_mask, past_key_values, None, labels
if type(images) is list or images.ndim == 5:
concat_images = torch.cat([image for image in images], dim=0)
image_features = self.encode_images(concat_images)
split_sizes = [image.shape[0] for image in images]
image_features = torch.split(image_features, split_sizes, dim=0)
image_features = [x.flatten(0, 1) for x in image_features]
else:
image_features = self.encode_images(images)
new_input_embeds = []
new_labels = [] if labels is not None else None
cur_image_idx = 0
for batch_idx, cur_input_ids in enumerate(input_ids):
|
class MobileVLMMetaModel:
def __init__(self, config):
super(MobileVLMMetaModel, self).__init__(config)
if hasattr(config, "mm_vision_tower"):
self.vision_tower = build_vision_tower(config, delay_load=False)
self.mm_projector = build_vision_projector(config)
def get_vision_tower(self):
vision_tower = getattr(self, 'vision_tower', None)
if type(vision_tower) is list:
vision_tower = vision_tower[0]
return vision_tower
def initialize_vision_modules(self, model_args, fsdp=None):
mm_vision_select_layer = model_args.mm_vision_select_layer
mm_vision_select_feature = model_args.mm_vision_select_feature
pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter
self.config.mm_vision_tower = model_args.vision_tower
self.config.use_mm_proj = True
self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear')
self.config.mm_vision_select_layer = mm_vision_select_layer
self.config.mm_vision_select_feature = mm_vision_select_feature
# Build VisionTower
vision_tower = build_vision_tower(model_args)
if fsdp is not None and len(fsdp) > 0:
self.vision_tower = [vision_tower]
else:
self.vision_tower = vision_tower
self.config.mm_hidden_size = vision_tower.hidden_size
# Build Vision-Projector
self.mm_projector = build_vision_projector(self.config)
if pretrain_mm_mlp_adapter is not None:
mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')
def get_w(weights, keyword):
return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k}
self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))
class MobileVLMMetaForCausalLM(ABC):
@abstractmethod
def get_model(self):
pass
def get_vision_tower(self):
return self.get_model().get_vision_tower()
def encode_images(self, images):
image_features = self.get_model().get_vision_tower()(images)
image_features = self.get_model().mm_projector(image_features)
return image_features
def prepare_inputs_labels_for_multimodal(
self, input_ids, attention_mask, past_key_values, labels, images
):
vision_tower = self.get_vision_tower()
if vision_tower is None or images is None or input_ids.shape[1] == 1:
if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:
attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)
return input_ids, attention_mask, past_key_values, None, labels
if type(images) is list or images.ndim == 5:
concat_images = torch.cat([image for image in images], dim=0)
image_features = self.encode_images(concat_images)
split_sizes = [image.shape[0] for image in images]
image_features = torch.split(image_features, split_sizes, dim=0)
image_features = [x.flatten(0, 1) for x in image_features]
else:
image_features = self.encode_images(images)
new_input_embeds = []
new_labels = [] if labels is not None else None
cur_image_idx = 0
for batch_idx, cur_input_ids in enumerate(input_ids): | if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0: | 3 | 2023-12-29 03:35:49+00:00 | 2k |
kinggongzilla/ai-clone-whatsapp | utils/config_utils.py | [
{
"identifier": "datasets",
"path": "configs/datasets.py",
"snippet": "class custom_dataset:"
},
{
"identifier": "lora_config",
"path": "configs/peft.py",
"snippet": "class lora_config:\n r: int=8\n lora_alpha: int=32\n target_modules: List[str] = field(default_factory=lambda: [\"q_proj\", \"v_proj\"])\n bias= \"none\"\n task_type: str= \"CAUSAL_LM\"\n lora_dropout: float=0.05\n inference_mode: bool = False"
},
{
"identifier": "llama_adapter_config",
"path": "configs/peft.py",
"snippet": "class llama_adapter_config:\n adapter_len: int= 10\n adapter_layers: int= 30\n task_type: str= \"CAUSAL_LM\""
},
{
"identifier": "prefix_config",
"path": "configs/peft.py",
"snippet": "class prefix_config:\n num_virtual_tokens: int=30\n task_type: str= \"CAUSAL_LM\" "
},
{
"identifier": "train_config",
"path": "configs/training.py",
"snippet": "class train_config:\n whatsapp_username: str=\"\" # your own whatsapp user name as it is in the chat .txt files\n model_name: str=\"mistralai/Mistral-7B-Instruct-v0.2\"\n enable_fsdp: bool=False\n low_cpu_fsdp: bool=False\n run_validation: bool=False\n batch_size_training: int=1\n batching_strategy: str=\"packing\" #alternative: padding\n context_length: int=4096\n gradient_accumulation_steps: int=1\n gradient_clipping: bool = False\n gradient_clipping_threshold: float = 1.0\n num_epochs: int=1\n num_workers_dataloader: int=1\n lr: float=1e-4\n weight_decay: float=0.0\n gamma: float= 0.85\n seed: int=42\n use_fp16: bool=True\n mixed_precision: bool=True\n val_batch_size: int=1\n dataset = \"custom_dataset\"\n data_dir: str = \"data/preprocessing/processed_chats\"\n peft_method: str = \"lora\" # None , llama_adapter, prefix\n use_peft: bool=True\n output_dir: str = \"checkpoints\"\n freeze_layers: bool = False\n num_freeze_layers: int = 1\n quantization: bool = True\n one_gpu: bool = False\n save_model: bool = True\n dist_checkpoint_root_folder: str=\"PATH/to/save/FSDP/model\" # will be used if using FSDP\n dist_checkpoint_folder: str=\"fine-tuned\" # will be used if using FSDP\n save_optimizer: bool=False # will be used if using FSDP\n use_fast_kernels: bool = False # Enable using SDPA from PyTroch Accelerated Transformers, make use Flash Attention and Xformer memory-efficient kernels"
},
{
"identifier": "LengthBasedBatchSampler",
"path": "data/sampler.py",
"snippet": "class LengthBasedBatchSampler(torch.utils.data.BatchSampler):\n def __init__(self, data_source, batch_size: int, drop_last: bool, shuffle: bool=True) -> None:\n if isinstance(next(iter(data_source)), dict):\n first_key = next(iter(next(iter(data_source)).keys()))\n self.lengths = [len(d[first_key]) for d in data_source]\n else:\n self.lengths = [len(d) for d in data_source]\n self.batch_size = batch_size\n self.drop_last = drop_last\n self.shuffle = shuffle\n\n def __iter__(self):\n ids = np.argsort(self.lengths)\n if self.drop_last:\n ids = ids[:len(ids) // self.batch_size * self.batch_size]\n\n batches = [ids[i:i+self.batch_size] for i in range(0, len(ids), self.batch_size)]\n\n if self.shuffle:\n random.shuffle(batches)\n\n for b in batches:\n yield b\n\n def __len__(self):\n if self.drop_last:\n return len(self.lengths) // self.batch_size\n else:\n return len(self.lengths) // self.batch_size + (len(self.lengths) % self.batch_size > 0)"
},
{
"identifier": "DistributedLengthBasedBatchSampler",
"path": "data/sampler.py",
"snippet": "class DistributedLengthBasedBatchSampler(torch.utils.data.BatchSampler):\n def __init__(self, data_source, batch_size: int, num_replicas: int, rank: int, shuffle: bool = True, seed: int = 0) -> None:\n random.seed(seed)\n self.batch_sampler = LengthBasedBatchSampler(\n data_source, batch_size=batch_size, drop_last=True, shuffle=shuffle\n )\n self.num_replicas = num_replicas\n self.rank = rank\n \n def __iter__(self):\n max_length = len(self.batch_sampler) // self.num_replicas * self.num_replicas\n return islice(self.batch_sampler, self.rank, max_length, self.num_replicas)\n \n def __len__(self):\n return len(self.batch_sampler) // self.num_replicas"
},
{
"identifier": "DATASET_PREPROC",
"path": "utils/dataset_utils.py",
"snippet": "DATASET_PREPROC = {\n \"custom_dataset\": get_custom_dataset,\n}"
}
] | import inspect
import torch.distributed as dist
from dataclasses import asdict
from torch.utils.data import DistributedSampler
from peft import (
LoraConfig,
AdaptionPromptConfig,
PrefixTuningConfig,
)
from transformers import default_data_collator
from transformers.data import DataCollatorForSeq2Seq
from configs import datasets, lora_config, llama_adapter_config, prefix_config, train_config
from data.sampler import LengthBasedBatchSampler, DistributedLengthBasedBatchSampler
from utils.dataset_utils import DATASET_PREPROC | 1,507 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
def update_config(config, **kwargs):
if isinstance(config, (tuple, list)):
for c in config:
update_config(c, **kwargs)
else:
for k, v in kwargs.items():
if hasattr(config, k):
setattr(config, k, v)
elif "." in k:
# allow --some_config.some_param=True
config_name, param_name = k.split(".")
if type(config).__name__ == config_name:
if hasattr(config, param_name):
setattr(config, param_name, v)
else:
# In case of specialized config we can warm user
print(f"Warning: {config_name} does not accept parameter: {k}")
elif isinstance(config, train_config):
print(f"Warning: unknown parameter {k}")
def generate_peft_config(train_config, kwargs):
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
def update_config(config, **kwargs):
if isinstance(config, (tuple, list)):
for c in config:
update_config(c, **kwargs)
else:
for k, v in kwargs.items():
if hasattr(config, k):
setattr(config, k, v)
elif "." in k:
# allow --some_config.some_param=True
config_name, param_name = k.split(".")
if type(config).__name__ == config_name:
if hasattr(config, param_name):
setattr(config, param_name, v)
else:
# In case of specialized config we can warm user
print(f"Warning: {config_name} does not accept parameter: {k}")
elif isinstance(config, train_config):
print(f"Warning: unknown parameter {k}")
def generate_peft_config(train_config, kwargs): | configs = (lora_config, llama_adapter_config, prefix_config) | 1 | 2023-12-28 00:02:08+00:00 | 2k |
FoundationVision/UniRef | projects/UniRef/uniref/models/deformable_detr/matcher.py | [
{
"identifier": "box_cxcywh_to_xyxy",
"path": "projects/UniRef/uniref/util/box_ops.py",
"snippet": "def box_cxcywh_to_xyxy(x):\n # print('box:\\n', x)\n\n x_c, y_c, w, h = x.unbind(-1)\n b = [(x_c - 0.5 * w), (y_c - 0.5 * h),\n (x_c + 0.5 * w), (y_c + 0.5 * h)]\n return torch.stack(b, dim=-1)"
},
{
"identifier": "generalized_box_iou",
"path": "projects/UniRef/uniref/util/box_ops.py",
"snippet": "def generalized_box_iou(boxes1, boxes2):\n \"\"\"\n Generalized IoU from https://giou.stanford.edu/\n\n The boxes should be in [x0, y0, x1, y1] format\n\n Returns a [N, M] pairwise matrix, where N = len(boxes1)\n and M = len(boxes2)\n \"\"\"\n # degenerate boxes gives inf / nan results\n # so do an early check\n\n assert (boxes1[:, 2:] >= boxes1[:, :2]).all()\n assert (boxes2[:, 2:] >= boxes2[:, :2]).all()\n iou, union = box_iou(boxes1, boxes2)\n\n lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])\n rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])\n\n wh = (rb - lt).clamp(min=0) # [N,M,2]\n area = wh[:, :, 0] * wh[:, :, 1]\n\n return iou - (area - union) / (area+1e-7)"
}
] | import torch
import torch.nn.functional as F
import torchvision.ops as ops
from scipy.optimize import linear_sum_assignment
from torch import nn
from ...util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou | 1,206 | # ------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
"""
Modules to compute the matching cost and solve the corresponding LSAP.
"""
class HungarianMatcher(nn.Module):
"""This class computes an assignment between the targets and the predictions of the network
For efficiency reasons, the targets don't include the no_object. Because of this, in general,
there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
while the others are un-matched (and thus treated as non-objects).
"""
def __init__(self,
cost_class: float = 1,
cost_bbox: float = 1,
cost_giou: float = 1):
"""Creates the matcher
Params:
cost_class: This is the relative weight of the classification error in the matching cost
cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost
cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost
"""
super().__init__()
self.cost_class = cost_class
self.cost_bbox = cost_bbox
self.cost_giou = cost_giou
assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0"
def forward_ota(self, outputs, targets):
""" simOTA for detr
"""
with torch.no_grad():
bs, num_queries = outputs["pred_logits"].shape[:2]
out_prob = outputs["pred_logits"].sigmoid()
out_bbox = outputs["pred_boxes"] # 跳过frame 维度
indices = []
matched_ids = []
for batch_idx in range(bs):
bz_boxes = out_bbox[batch_idx] #[300,4]
bz_out_prob = out_prob[batch_idx]
bz_tgt_ids = targets[batch_idx]["labels"]
num_insts = len(bz_tgt_ids)
bz_gtboxs = targets[batch_idx]['boxes'].reshape(num_insts,4) #[num_gt, 4]
fg_mask, is_in_boxes_and_center = \
self.get_in_boxes_info(bz_boxes,bz_gtboxs,expanded_strides=32)
pair_wise_ious = ops.box_iou(box_cxcywh_to_xyxy(bz_boxes), box_cxcywh_to_xyxy(bz_gtboxs))
# pair_wise_ious_loss = -torch.log(pair_wise_ious + 1e-8)
# Compute the classification cost.
alpha = 0.25
gamma = 2.0
neg_cost_class = (1 - alpha) * (bz_out_prob ** gamma) * (-(1 - bz_out_prob + 1e-8).log())
pos_cost_class = alpha * ((1 - bz_out_prob) ** gamma) * (-(bz_out_prob + 1e-8).log())
cost_class = pos_cost_class[:, bz_tgt_ids] - neg_cost_class[:, bz_tgt_ids]
| # ------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
"""
Modules to compute the matching cost and solve the corresponding LSAP.
"""
class HungarianMatcher(nn.Module):
"""This class computes an assignment between the targets and the predictions of the network
For efficiency reasons, the targets don't include the no_object. Because of this, in general,
there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
while the others are un-matched (and thus treated as non-objects).
"""
def __init__(self,
cost_class: float = 1,
cost_bbox: float = 1,
cost_giou: float = 1):
"""Creates the matcher
Params:
cost_class: This is the relative weight of the classification error in the matching cost
cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost
cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost
"""
super().__init__()
self.cost_class = cost_class
self.cost_bbox = cost_bbox
self.cost_giou = cost_giou
assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0"
def forward_ota(self, outputs, targets):
""" simOTA for detr
"""
with torch.no_grad():
bs, num_queries = outputs["pred_logits"].shape[:2]
out_prob = outputs["pred_logits"].sigmoid()
out_bbox = outputs["pred_boxes"] # 跳过frame 维度
indices = []
matched_ids = []
for batch_idx in range(bs):
bz_boxes = out_bbox[batch_idx] #[300,4]
bz_out_prob = out_prob[batch_idx]
bz_tgt_ids = targets[batch_idx]["labels"]
num_insts = len(bz_tgt_ids)
bz_gtboxs = targets[batch_idx]['boxes'].reshape(num_insts,4) #[num_gt, 4]
fg_mask, is_in_boxes_and_center = \
self.get_in_boxes_info(bz_boxes,bz_gtboxs,expanded_strides=32)
pair_wise_ious = ops.box_iou(box_cxcywh_to_xyxy(bz_boxes), box_cxcywh_to_xyxy(bz_gtboxs))
# pair_wise_ious_loss = -torch.log(pair_wise_ious + 1e-8)
# Compute the classification cost.
alpha = 0.25
gamma = 2.0
neg_cost_class = (1 - alpha) * (bz_out_prob ** gamma) * (-(1 - bz_out_prob + 1e-8).log())
pos_cost_class = alpha * ((1 - bz_out_prob) ** gamma) * (-(bz_out_prob + 1e-8).log())
cost_class = pos_cost_class[:, bz_tgt_ids] - neg_cost_class[:, bz_tgt_ids] | cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(bz_boxes), box_cxcywh_to_xyxy(bz_gtboxs)) | 1 | 2023-12-22 13:31:33+00:00 | 2k |
xhuangcv/humannorm | threestudio/models/materials/neural_radiance_material.py | [
{
"identifier": "BaseMaterial",
"path": "threestudio/models/materials/base.py",
"snippet": "class BaseMaterial(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n requires_normal: bool = False\n requires_tangent: bool = False\n\n def configure(self):\n pass\n\n def forward(self, *args, **kwargs) -> Float[Tensor, \"*B 3\"]:\n raise NotImplementedError\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}"
},
{
"identifier": "get_encoding",
"path": "threestudio/models/networks.py",
"snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding"
},
{
"identifier": "get_mlp",
"path": "threestudio/models/networks.py",
"snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network"
},
{
"identifier": "dot",
"path": "threestudio/utils/ops.py",
"snippet": "def dot(x, y):\n return torch.sum(x * y, -1, keepdim=True)"
},
{
"identifier": "get_activation",
"path": "threestudio/utils/ops.py",
"snippet": "def get_activation(name) -> Callable:\n if name is None:\n return lambda x: x\n name = name.lower()\n if name == \"none\":\n return lambda x: x\n elif name == \"lin2srgb\":\n return lambda x: torch.where(\n x > 0.0031308,\n torch.pow(torch.clamp(x, min=0.0031308), 1.0 / 2.4) * 1.055 - 0.055,\n 12.92 * x,\n ).clamp(0.0, 1.0)\n elif name == \"exp\":\n return lambda x: torch.exp(x)\n elif name == \"shifted_exp\":\n return lambda x: torch.exp(x - 1.0)\n elif name == \"trunc_exp\":\n return trunc_exp\n elif name == \"shifted_trunc_exp\":\n return lambda x: trunc_exp(x - 1.0)\n elif name == \"sigmoid\":\n return lambda x: torch.sigmoid(x)\n elif name == \"tanh\":\n return lambda x: torch.tanh(x)\n elif name == \"shifted_softplus\":\n return lambda x: F.softplus(x - 1.0)\n elif name == \"scale_-11_01\":\n return lambda x: x * 0.5 + 0.5\n else:\n try:\n return getattr(F, name)\n except AttributeError:\n raise ValueError(f\"Unknown activation function: {name}\")"
}
] | import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio
from dataclasses import dataclass, field
from threestudio.models.materials.base import BaseMaterial
from threestudio.models.networks import get_encoding, get_mlp
from threestudio.utils.ops import dot, get_activation
from threestudio.utils.typing import * | 1,149 |
@threestudio.register("neural-radiance-material")
class NeuralRadianceMaterial(BaseMaterial):
@dataclass
class Config(BaseMaterial.Config):
input_feature_dims: int = 8
color_activation: str = "sigmoid"
dir_encoding_config: dict = field(
default_factory=lambda: {"otype": "SphericalHarmonics", "degree": 3}
)
mlp_network_config: dict = field(
default_factory=lambda: {
"otype": "FullyFusedMLP",
"activation": "ReLU",
"n_neurons": 16,
"n_hidden_layers": 2,
}
)
cfg: Config
def configure(self) -> None:
|
@threestudio.register("neural-radiance-material")
class NeuralRadianceMaterial(BaseMaterial):
@dataclass
class Config(BaseMaterial.Config):
input_feature_dims: int = 8
color_activation: str = "sigmoid"
dir_encoding_config: dict = field(
default_factory=lambda: {"otype": "SphericalHarmonics", "degree": 3}
)
mlp_network_config: dict = field(
default_factory=lambda: {
"otype": "FullyFusedMLP",
"activation": "ReLU",
"n_neurons": 16,
"n_hidden_layers": 2,
}
)
cfg: Config
def configure(self) -> None: | self.encoding = get_encoding(3, self.cfg.dir_encoding_config) | 1 | 2023-12-23 12:37:48+00:00 | 2k |
jianchang512/stt | start.py | [
{
"identifier": "cfg",
"path": "stslib/cfg.py",
"snippet": "LANG = \"en\" if locale.getdefaultlocale()[0].split('_')[0].lower() != 'zh' else \"zh\"\nROOT_DIR = os.getcwd()\nMODEL_DIR = os.path.join(ROOT_DIR, 'models')\nSTATIC_DIR = os.path.join(ROOT_DIR, 'static')\nTMP_DIR = os.path.join(STATIC_DIR, 'tmp')"
},
{
"identifier": "tool",
"path": "stslib/tool.py",
"snippet": "def runffmpeg(arg):\ndef checkupdate():\ndef openweb(web_address):\ndef ms_to_time_string(*, ms=0, seconds=None):"
},
{
"identifier": "ROOT_DIR",
"path": "stslib/cfg.py",
"snippet": "ROOT_DIR = os.getcwd()"
}
] | import logging
import re
import threading
import sys
import torch
import os
from flask import Flask, request, render_template, jsonify, send_from_directory
from gevent.pywsgi import WSGIServer, WSGIHandler, LoggingLogAdapter
from logging.handlers import RotatingFileHandler
from stslib import cfg, tool
from stslib.cfg import ROOT_DIR
from faster_whisper import WhisperModel | 836 |
device = "cuda" if torch.cuda.is_available() else "cpu"
class CustomRequestHandler(WSGIHandler):
def log_request(self):
pass
# 配置日志
# 禁用 Werkzeug 默认的日志处理器
log = logging.getLogger('werkzeug')
log.handlers[:] = []
log.setLevel(logging.WARNING)
app = Flask(__name__, static_folder=os.path.join(ROOT_DIR, 'static'), static_url_path='/static',
template_folder=os.path.join(ROOT_DIR, 'templates'))
root_log = logging.getLogger() # Flask的根日志记录器
root_log.handlers = []
root_log.setLevel(logging.WARNING)
# 配置日志
app.logger.setLevel(logging.WARNING) # 设置日志级别为 INFO
# 创建 RotatingFileHandler 对象,设置写入的文件路径和大小限制
file_handler = RotatingFileHandler(os.path.join(ROOT_DIR, 'sts.log'), maxBytes=1024 * 1024, backupCount=5)
# 创建日志的格式
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# 设置文件处理器的级别和格式
file_handler.setLevel(logging.WARNING)
file_handler.setFormatter(formatter)
# 将文件处理器添加到日志记录器中
app.logger.addHandler(file_handler)
@app.route('/static/<path:filename>')
def static_files(filename):
return send_from_directory(app.config['STATIC_FOLDER'], filename)
@app.route('/')
def index():
return render_template("index.html",
cuda=cfg.cuda,
lang_code=cfg.lang_code,
language=cfg.LANG,
root_dir=ROOT_DIR.replace('\\', '/'))
# 上传音频
@app.route('/upload', methods=['POST'])
def upload():
try:
# 获取上传的文件
audio_file = request.files['audio']
# 如果是mp4
noextname, ext = os.path.splitext(audio_file.filename)
ext = ext.lower()
# 如果是视频,先分离
wav_file = os.path.join(cfg.TMP_DIR, f'{noextname}.wav')
if os.path.exists(wav_file) and os.path.getsize(wav_file) > 0:
return jsonify({'code': 0, 'msg': cfg.transobj['lang1'], "data": os.path.basename(wav_file)})
msg = ""
if ext in ['.mp4', '.mov', '.avi', '.mkv', '.mpeg', '.mp3', '.flac']:
video_file = os.path.join(cfg.TMP_DIR, f'{noextname}{ext}')
audio_file.save(video_file)
params = [
"-i",
video_file,
]
if ext not in ['.mp3', '.flac']:
params.append('-vn')
params.append(wav_file)
|
device = "cuda" if torch.cuda.is_available() else "cpu"
class CustomRequestHandler(WSGIHandler):
def log_request(self):
pass
# 配置日志
# 禁用 Werkzeug 默认的日志处理器
log = logging.getLogger('werkzeug')
log.handlers[:] = []
log.setLevel(logging.WARNING)
app = Flask(__name__, static_folder=os.path.join(ROOT_DIR, 'static'), static_url_path='/static',
template_folder=os.path.join(ROOT_DIR, 'templates'))
root_log = logging.getLogger() # Flask的根日志记录器
root_log.handlers = []
root_log.setLevel(logging.WARNING)
# 配置日志
app.logger.setLevel(logging.WARNING) # 设置日志级别为 INFO
# 创建 RotatingFileHandler 对象,设置写入的文件路径和大小限制
file_handler = RotatingFileHandler(os.path.join(ROOT_DIR, 'sts.log'), maxBytes=1024 * 1024, backupCount=5)
# 创建日志的格式
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# 设置文件处理器的级别和格式
file_handler.setLevel(logging.WARNING)
file_handler.setFormatter(formatter)
# 将文件处理器添加到日志记录器中
app.logger.addHandler(file_handler)
@app.route('/static/<path:filename>')
def static_files(filename):
return send_from_directory(app.config['STATIC_FOLDER'], filename)
@app.route('/')
def index():
return render_template("index.html",
cuda=cfg.cuda,
lang_code=cfg.lang_code,
language=cfg.LANG,
root_dir=ROOT_DIR.replace('\\', '/'))
# 上传音频
@app.route('/upload', methods=['POST'])
def upload():
try:
# 获取上传的文件
audio_file = request.files['audio']
# 如果是mp4
noextname, ext = os.path.splitext(audio_file.filename)
ext = ext.lower()
# 如果是视频,先分离
wav_file = os.path.join(cfg.TMP_DIR, f'{noextname}.wav')
if os.path.exists(wav_file) and os.path.getsize(wav_file) > 0:
return jsonify({'code': 0, 'msg': cfg.transobj['lang1'], "data": os.path.basename(wav_file)})
msg = ""
if ext in ['.mp4', '.mov', '.avi', '.mkv', '.mpeg', '.mp3', '.flac']:
video_file = os.path.join(cfg.TMP_DIR, f'{noextname}{ext}')
audio_file.save(video_file)
params = [
"-i",
video_file,
]
if ext not in ['.mp3', '.flac']:
params.append('-vn')
params.append(wav_file) | rs = tool.runffmpeg(params) | 1 | 2023-12-28 16:02:55+00:00 | 2k |
jesenzhang/ComfyUI_StreamDiffusion | streamdiffusion/pipeline.py | [
{
"identifier": "SimilarImageFilter",
"path": "streamdiffusion/image_filter.py",
"snippet": "class SimilarImageFilter:\n def __init__(self, threshold: float = 0.98, max_skip_frame: float = 10) -> None:\n self.threshold = threshold\n self.prev_tensor = None\n self.cos = torch.nn.CosineSimilarity(dim=0, eps=1e-6)\n self.max_skip_frame = max_skip_frame\n self.skip_count = 0\n\n def __call__(self, x: torch.Tensor) -> Optional[torch.Tensor]:\n if self.prev_tensor is None:\n self.prev_tensor = x.detach().clone()\n return x\n else:\n cos_sim = self.cos(self.prev_tensor.reshape(-1), x.reshape(-1)).item()\n sample = random.uniform(0, 1)\n if self.threshold >= 1:\n skip_prob = 0\n else:\n skip_prob = max(0, 1 - (1 - cos_sim) / (1 - self.threshold))\n\n # not skip frame\n if skip_prob < sample:\n self.prev_tensor = x.detach().clone()\n return x\n # skip frame\n else:\n if self.skip_count > self.max_skip_frame:\n self.skip_count = 0\n self.prev_tensor = x.detach().clone()\n return x\n else:\n self.skip_count += 1\n return None\n\n def set_threshold(self, threshold: float) -> None:\n self.threshold = threshold\n \n def set_max_skip_frame(self, max_skip_frame: float) -> None:\n self.max_skip_frame = max_skip_frame"
},
{
"identifier": "postprocess_image",
"path": "streamdiffusion/image_utils.py",
"snippet": "def postprocess_image(\n image: torch.Tensor,\n output_type: str = \"pil\",\n do_denormalize: Optional[List[bool]] = None,\n) -> Union[torch.Tensor, np.ndarray, PIL.Image.Image]:\n if not isinstance(image, torch.Tensor):\n raise ValueError(\n f\"Input for postprocessing is in incorrect format: {type(image)}. We only support pytorch tensor\"\n )\n\n if output_type == \"latent\":\n return image\n\n do_normalize_flg = True\n if do_denormalize is None:\n do_denormalize = [do_normalize_flg] * image.shape[0]\n\n image = torch.stack(\n [\n denormalize(image[i]) if do_denormalize[i] else image[i]\n for i in range(image.shape[0])\n ]\n )\n\n if output_type == \"pt\":\n return image\n\n image = pt_to_numpy(image)\n\n if output_type == \"np\":\n return image\n\n if output_type == \"pil\":\n return numpy_to_pil(image)"
}
] | import time
import numpy as np
import PIL.Image
import torch
from typing import List, Optional, Union, Any, Dict, Tuple, Literal
from diffusers import LCMScheduler, StableDiffusionPipeline
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img import (
retrieve_latents,
)
from .image_filter import SimilarImageFilter
from .image_utils import postprocess_image | 1,162 |
class StreamDiffusion:
def __init__(
self,
pipe: StableDiffusionPipeline,
t_index_list: List[int],
torch_dtype: torch.dtype = torch.float16,
width: int = 512,
height: int = 512,
do_add_noise: bool = True,
use_denoising_batch: bool = True,
frame_buffer_size: int = 1,
cfg_type: Literal["none", "full", "self", "initialize"] = "self",
) -> None:
self.device = pipe.device
self.dtype = torch_dtype
self.generator = None
self.height = height
self.width = width
self.latent_height = int(height // pipe.vae_scale_factor)
self.latent_width = int(width // pipe.vae_scale_factor)
self.frame_bff_size = frame_buffer_size
self.denoising_steps_num = len(t_index_list)
self.cfg_type = cfg_type
if use_denoising_batch:
self.batch_size = self.denoising_steps_num * frame_buffer_size
if self.cfg_type == "initialize":
self.trt_unet_batch_size = (
self.denoising_steps_num + 1
) * self.frame_bff_size
elif self.cfg_type == "full":
self.trt_unet_batch_size = (
2 * self.denoising_steps_num * self.frame_bff_size
)
else:
self.trt_unet_batch_size = self.denoising_steps_num * frame_buffer_size
else:
self.trt_unet_batch_size = self.frame_bff_size
self.batch_size = frame_buffer_size
self.t_list = t_index_list
self.do_add_noise = do_add_noise
self.use_denoising_batch = use_denoising_batch
self.similar_image_filter = False
|
class StreamDiffusion:
def __init__(
self,
pipe: StableDiffusionPipeline,
t_index_list: List[int],
torch_dtype: torch.dtype = torch.float16,
width: int = 512,
height: int = 512,
do_add_noise: bool = True,
use_denoising_batch: bool = True,
frame_buffer_size: int = 1,
cfg_type: Literal["none", "full", "self", "initialize"] = "self",
) -> None:
self.device = pipe.device
self.dtype = torch_dtype
self.generator = None
self.height = height
self.width = width
self.latent_height = int(height // pipe.vae_scale_factor)
self.latent_width = int(width // pipe.vae_scale_factor)
self.frame_bff_size = frame_buffer_size
self.denoising_steps_num = len(t_index_list)
self.cfg_type = cfg_type
if use_denoising_batch:
self.batch_size = self.denoising_steps_num * frame_buffer_size
if self.cfg_type == "initialize":
self.trt_unet_batch_size = (
self.denoising_steps_num + 1
) * self.frame_bff_size
elif self.cfg_type == "full":
self.trt_unet_batch_size = (
2 * self.denoising_steps_num * self.frame_bff_size
)
else:
self.trt_unet_batch_size = self.denoising_steps_num * frame_buffer_size
else:
self.trt_unet_batch_size = self.frame_bff_size
self.batch_size = frame_buffer_size
self.t_list = t_index_list
self.do_add_noise = do_add_noise
self.use_denoising_batch = use_denoising_batch
self.similar_image_filter = False | self.similar_filter = SimilarImageFilter() | 0 | 2023-12-29 09:00:03+00:00 | 2k |
neobundy/MLX-Stable-Diffusion-WebUI | model_inspector.py | [
{
"identifier": "PathConfig",
"path": "stable_diffusion/config.py",
"snippet": "class DiffuserModelPathConfig:\nclass BaseConfig:\nclass AutoencoderConfig(BaseConfig):\nclass CLIPTextModelConfig(BaseConfig):\nclass UNetConfig(BaseConfig):\nclass DiffusionConfig(BaseConfig):\n def __init__(self, model_path: str = \"./diffuser_models\"):\n def unet_config(self):\n def unet(self):\n def scheduler(self):\n def text_encoder_config(self):\n def text_encoder(self):\n def vae_config(self):\n def vae(self):\n def diffusion_config(self):\n def tokenizer_vocab(self):\n def tokenizer_merges(self):\n def __getitem__(self, key):\n def __setitem__(self, key, value):"
},
{
"identifier": "preload_models_from_safetensor_weights",
"path": "stable_diffusion/model_io.py",
"snippet": "_DEBUG = False\ndef _debug_print(*args, **kwargs):\ndef _from_numpy(x):\ndef map_unet_weights(key, value):\ndef map_clip_text_encoder_weights(key, value):\ndef map_vae_weights(key, value):\ndef _flatten(params):\ndef _load_safetensor_weights(mapper, model, weight_file, float16: bool = False):\ndef _check_key(key: str, part: str):\ndef load_unet(key: str = _DEFAULT_MODEL, float16: bool = False):\ndef load_text_encoder(key: str = _DEFAULT_MODEL, float16: bool = False):\ndef load_autoencoder(key: str = _DEFAULT_MODEL, float16: bool = False):\ndef load_diffusion_config(key: str = _DEFAULT_MODEL):\ndef load_tokenizer(key: str = _DEFAULT_MODEL):\ndef load_unet_local(weights_path: str, config_path: str, float16: bool = False):\ndef load_text_encoder_local(weights_path: str, config_path: str, float16: bool = False):\ndef load_autoencoder_local(weights_path: str, config_path: str, float16: bool = False):\ndef load_diffusion_config_local(config_path:str):\ndef load_tokenizer_local(vocab_path: str, merges_path: str):\ndef load_diffuser_model(diffuser_model_path: str, float16: bool = False):"
},
{
"identifier": "_state_dict",
"path": "utils.py",
"snippet": "def _state_dict(model):\n \"\"\"Return the model's state_dict as a dictionary.\"\"\"\n state_dict = {}\n for name, param in model.parameters().items():\n state_dict[name] = param\n return state_dict"
},
{
"identifier": "get_state_dict_from_safetensor",
"path": "utils.py",
"snippet": "def get_state_dict_from_safetensor(checkpoint_path: str):\n \"\"\"Return the state_dict from the checkpoint.\"\"\"\n state_dict = {}\n with safetensor_open(checkpoint_path, framework=\"numpy\") as f:\n # Access the data in the file\n for key in f.keys():\n tensor = f.get_tensor(key)\n state_dict[key] = tensor\n return state_dict"
}
] | from stable_diffusion.config import PathConfig
from stable_diffusion.model_io import preload_models_from_safetensor_weights
from utils import _state_dict
from utils import get_state_dict_from_safetensor | 1,090 |
INSPECTION_FILE = "model_inspection.txt"
NUM_ITEMS = 100
MODEL_FILE = "./models/v2-1_512-ema-pruned.safetensors"
MODEL_FILE1 = "./unet/diffusion_pytorch_model_test.safetensors"
MODEL_FILE2 = "./unet/xxmix9realistic_v40.safetensors"
# Recreate the inspection file at every execution of the script
with open(INSPECTION_FILE, 'w') as f:
pass
def write_to_file(*args, **kwargs):
"""Write the text to the inspection file."""
# Convert the arguments to a string
message = ' '.join(map(str, args))
# Print the message to the console
print(message, **kwargs)
# Open the log file in append mode and write the message
with open(INSPECTION_FILE, 'a') as f:
f.write(message + '\n')
def inspect_model(path_config: PathConfig, keys_only=True):
"""Inspect the contents of the models."""
# Load the models using the provided config and weights paths
unet_model = load_unet_local(path_config.unet_config, MODEL_FILE)
text_encoder_model = load_text_encoder_local(MODEL_FILE)
autoencoder_model = load_autoencoder_local(MODEL_FILE)
diffusion_config = load_diffusion_config_local(path_config.diffusion_config)
tokenizer = load_tokenizer_local(path_config.tokenizer_vocab, path_config.tokenizer_merges)
# Convert the models' state_dict to a dictionary and iterate over it
for model_name, model in zip(["unet", "text_encoder", "autoencoder"], [unet_model, text_encoder_model, autoencoder_model]):
write_to_file("-" * 50)
write_to_file(f"Model: {model_name}")
write_to_file("-" * 50)
|
INSPECTION_FILE = "model_inspection.txt"
NUM_ITEMS = 100
MODEL_FILE = "./models/v2-1_512-ema-pruned.safetensors"
MODEL_FILE1 = "./unet/diffusion_pytorch_model_test.safetensors"
MODEL_FILE2 = "./unet/xxmix9realistic_v40.safetensors"
# Recreate the inspection file at every execution of the script
with open(INSPECTION_FILE, 'w') as f:
pass
def write_to_file(*args, **kwargs):
"""Write the text to the inspection file."""
# Convert the arguments to a string
message = ' '.join(map(str, args))
# Print the message to the console
print(message, **kwargs)
# Open the log file in append mode and write the message
with open(INSPECTION_FILE, 'a') as f:
f.write(message + '\n')
def inspect_model(path_config: PathConfig, keys_only=True):
"""Inspect the contents of the models."""
# Load the models using the provided config and weights paths
unet_model = load_unet_local(path_config.unet_config, MODEL_FILE)
text_encoder_model = load_text_encoder_local(MODEL_FILE)
autoencoder_model = load_autoencoder_local(MODEL_FILE)
diffusion_config = load_diffusion_config_local(path_config.diffusion_config)
tokenizer = load_tokenizer_local(path_config.tokenizer_vocab, path_config.tokenizer_merges)
# Convert the models' state_dict to a dictionary and iterate over it
for model_name, model in zip(["unet", "text_encoder", "autoencoder"], [unet_model, text_encoder_model, autoencoder_model]):
write_to_file("-" * 50)
write_to_file(f"Model: {model_name}")
write_to_file("-" * 50) | for key, value in _state_dict(model).items(): | 2 | 2023-12-25 05:49:34+00:00 | 2k |
ffmemes/ff-backend | src/storage/service.py | [
{
"identifier": "language",
"path": "src/database.py",
"snippet": "DATABASE_URL = str(settings.DATABASE_URL)\nasync def fetch_one(select_query: Select | Insert | Update) -> dict[str, Any] | None:\nasync def fetch_all(select_query: Select | Insert | Update) -> list[dict[str, Any]]:\nasync def execute(select_query: Insert | Update) -> CursorResult:"
},
{
"identifier": "TgChannelPostParsingResult",
"path": "src/storage/parsers/schemas.py",
"snippet": "class TgChannelPostParsingResult(CustomModel):\n post_id: int\n url: str\n content: str | None = None # post text\n media: list[dict] | None = None\n views: int\n date: datetime\n\n mentions: list[str] | None = None # mentioned usernames\n hashtags: list[str] | None = None\n forwarded: dict | None = None\n forwarded_url: str | None = None # url to forwarded post\n link_preview: dict | None = None\n out_links: list[str] | None = None"
},
{
"identifier": "VkGroupPostParsingResult",
"path": "src/storage/parsers/schemas.py",
"snippet": "class VkGroupPostParsingResult(CustomModel):\n post_id: str\n url: str\n content: str | None = None # post text\n media: list[str]\n date: datetime\n views: int\n likes: int\n reposts: int\n comments: int"
},
{
"identifier": "MemeSourceType",
"path": "src/storage/constants.py",
"snippet": "class MemeSourceType(str, Enum):\n TELEGRAM = \"telegram\"\n VK = \"vk\"\n REDDIT = \"reddit\"\n INSTAGRAM = \"instagram\"\n TWITTER = \"twitter\"\n TIKTOK = \"tiktok\"\n USER_UPLOAD = \"user upload\""
},
{
"identifier": "MemeSourceStatus",
"path": "src/storage/constants.py",
"snippet": "class MemeSourceStatus(str, Enum):\n IN_MODERATION = \"in_moderation\"\n PARSING_ENABLED = \"parsing_enabled\"\n PARSING_DISABLED = \"parsing_disabled\""
},
{
"identifier": "MemeType",
"path": "src/storage/constants.py",
"snippet": "class MemeType(str, Enum):\n IMAGE = \"image\"\n ANIMATION = \"animation\"\n VIDEO = \"video\""
},
{
"identifier": "MemeStatus",
"path": "src/storage/constants.py",
"snippet": "class MemeStatus(str, Enum):\n CREATED = \"created\"\n OK = \"ok\"\n DUPLICATE = \"duplicate\"\n AD = \"ad\"\n BROKEN_CONTENT_LINK = \"broken_content_link\"\n \n # TODO: more statuses?\n # IN_MODERATION = \"in_moderation\""
},
{
"identifier": "MEME_RAW_TELEGRAM_MEME_SOURCE_POST_UNIQUE_CONSTRAINT",
"path": "src/storage/constants.py",
"snippet": "MEME_RAW_TELEGRAM_MEME_SOURCE_POST_UNIQUE_CONSTRAINT = \"meme_raw_telegram_meme_source_id_post_id_key\""
},
{
"identifier": "MEME_RAW_VK_MEME_SOURCE_POST_UNIQUE_CONSTRAINT",
"path": "src/storage/constants.py",
"snippet": "MEME_RAW_VK_MEME_SOURCE_POST_UNIQUE_CONSTRAINT = \"meme_raw_vk_meme_source_id_post_id_key\""
}
] | from typing import Any
from datetime import datetime
from sqlalchemy import select, nulls_first, text
from sqlalchemy.dialects.postgresql import insert
from src.database import (
language,
meme,
meme_source,
meme_raw_telegram,
meme_raw_vk,
execute, fetch_one, fetch_all,
)
from src.storage.parsers.schemas import TgChannelPostParsingResult, VkGroupPostParsingResult
from src.storage.constants import (
MemeSourceType,
MemeSourceStatus,
MemeType,
MemeStatus,
MEME_RAW_TELEGRAM_MEME_SOURCE_POST_UNIQUE_CONSTRAINT,
MEME_RAW_VK_MEME_SOURCE_POST_UNIQUE_CONSTRAINT,
) | 1,154 |
async def insert_parsed_posts_from_telegram(
meme_source_id: int,
telegram_posts: list[TgChannelPostParsingResult],
) -> None:
posts = [
post.model_dump() | {"meme_source_id": meme_source_id}
for post in telegram_posts
]
insert_statement = insert(meme_raw_telegram).values(posts)
insert_posts_query = insert_statement.on_conflict_do_update(
constraint=MEME_RAW_TELEGRAM_MEME_SOURCE_POST_UNIQUE_CONSTRAINT,
set_={
"media": insert_statement.excluded.media,
"views": insert_statement.excluded.views,
"updated_at": datetime.utcnow(),
},
)
await execute(insert_posts_query)
async def insert_parsed_posts_from_vk(
meme_source_id: int,
vk_posts: list[VkGroupPostParsingResult],
) -> None:
posts = [
post.model_dump() | {"meme_source_id": meme_source_id}
for post in vk_posts
]
insert_statement = insert(meme_raw_vk).values(posts)
insert_posts_query = insert_statement.on_conflict_do_update(
constraint=MEME_RAW_VK_MEME_SOURCE_POST_UNIQUE_CONSTRAINT,
set_={
"media": insert_statement.excluded.media,
"views": insert_statement.excluded.views,
"likes": insert_statement.excluded.likes,
"reposts": insert_statement.excluded.reposts,
"comments": insert_statement.excluded.comments,
"updated_at": datetime.utcnow(),
},
)
await execute(insert_posts_query)
async def get_telegram_sources_to_parse(limit=10) -> list[dict[str, Any]]:
select_query = (
select(meme_source)
|
async def insert_parsed_posts_from_telegram(
meme_source_id: int,
telegram_posts: list[TgChannelPostParsingResult],
) -> None:
posts = [
post.model_dump() | {"meme_source_id": meme_source_id}
for post in telegram_posts
]
insert_statement = insert(meme_raw_telegram).values(posts)
insert_posts_query = insert_statement.on_conflict_do_update(
constraint=MEME_RAW_TELEGRAM_MEME_SOURCE_POST_UNIQUE_CONSTRAINT,
set_={
"media": insert_statement.excluded.media,
"views": insert_statement.excluded.views,
"updated_at": datetime.utcnow(),
},
)
await execute(insert_posts_query)
async def insert_parsed_posts_from_vk(
meme_source_id: int,
vk_posts: list[VkGroupPostParsingResult],
) -> None:
posts = [
post.model_dump() | {"meme_source_id": meme_source_id}
for post in vk_posts
]
insert_statement = insert(meme_raw_vk).values(posts)
insert_posts_query = insert_statement.on_conflict_do_update(
constraint=MEME_RAW_VK_MEME_SOURCE_POST_UNIQUE_CONSTRAINT,
set_={
"media": insert_statement.excluded.media,
"views": insert_statement.excluded.views,
"likes": insert_statement.excluded.likes,
"reposts": insert_statement.excluded.reposts,
"comments": insert_statement.excluded.comments,
"updated_at": datetime.utcnow(),
},
)
await execute(insert_posts_query)
async def get_telegram_sources_to_parse(limit=10) -> list[dict[str, Any]]:
select_query = (
select(meme_source) | .where(meme_source.c.type == MemeSourceType.TELEGRAM) | 3 | 2023-12-23 12:55:43+00:00 | 2k |
Con6924/SPM | src/configs/prompt.py | [
{
"identifier": "imagenet_templates",
"path": "src/misc/clip_templates.py",
"snippet": ""
},
{
"identifier": "encode_prompts",
"path": "src/engine/train_util.py",
"snippet": "def encode_prompts(\n tokenizer: CLIPTokenizer,\n text_encoder: CLIPTokenizer,\n prompts: list[str],\n return_tokens: bool = False,\n):\n text_tokens = text_tokenize(tokenizer, prompts)\n text_embeddings = text_encode(text_encoder, text_tokens)\n\n if return_tokens:\n return text_embeddings, torch.unique(text_tokens, dim=1)\n return text_embeddings"
}
] | from typing import Literal, Optional, Union
from pathlib import Path
from pydantic import BaseModel, root_validator
from transformers import CLIPTextModel, CLIPTokenizer
from src.misc.clip_templates import imagenet_templates
from src.engine.train_util import encode_prompts
import yaml
import pandas as pd
import random
import torch | 1,147 |
class PromptEmbedsXL:
text_embeds: torch.FloatTensor
pooled_embeds: torch.FloatTensor
def __init__(self, embeds) -> None:
self.text_embeds, self.pooled_embeds = embeds
PROMPT_EMBEDDING = Union[torch.FloatTensor, PromptEmbedsXL]
class PromptEmbedsCache:
prompts: dict[str, PROMPT_EMBEDDING] = {}
def __setitem__(self, __name: str, __value: PROMPT_EMBEDDING) -> None:
self.prompts[__name] = __value
def __getitem__(self, __name: str) -> Optional[PROMPT_EMBEDDING]:
if __name in self.prompts:
return self.prompts[__name]
else:
return None
class PromptSettings(BaseModel): # yaml
target: str
positive: str = None # if None, target will be used
unconditional: str = "" # default is ""
neutral: str = None # if None, unconditional will be used
action: ACTION_TYPES = "erase" # default is "erase"
guidance_scale: float = 1.0 # default is 1.0
resolution: int = 512 # default is 512
dynamic_resolution: bool = False # default is False
batch_size: int = 1 # default is 1
dynamic_crops: bool = False # default is False. only used when model is XL
use_template: bool = False # default is False
la_strength: float = 1000.0
sampling_batch_size: int = 4
seed: int = None
case_number: int = 0
@root_validator(pre=True)
def fill_prompts(cls, values):
keys = values.keys()
if "target" not in keys:
raise ValueError("target must be specified")
if "positive" not in keys:
values["positive"] = values["target"]
if "unconditional" not in keys:
values["unconditional"] = ""
if "neutral" not in keys:
values["neutral"] = values["unconditional"]
return values
class PromptEmbedsPair:
target: PROMPT_EMBEDDING # the concept that do not want to generate
positive: PROMPT_EMBEDDING # generate the concept
unconditional: PROMPT_EMBEDDING # uncondition (default should be empty)
neutral: PROMPT_EMBEDDING # base condition (default should be empty)
use_template: bool = False # use clip template or not
guidance_scale: float
resolution: int
dynamic_resolution: bool
batch_size: int
dynamic_crops: bool
loss_fn: torch.nn.Module
action: ACTION_TYPES
def __init__(
self,
loss_fn: torch.nn.Module,
target: PROMPT_EMBEDDING,
positive: PROMPT_EMBEDDING,
unconditional: PROMPT_EMBEDDING,
neutral: PROMPT_EMBEDDING,
settings: PromptSettings,
) -> None:
self.loss_fn = loss_fn
self.target = target
self.positive = positive
self.unconditional = unconditional
self.neutral = neutral
self.settings = settings
self.use_template = settings.use_template
self.guidance_scale = settings.guidance_scale
self.resolution = settings.resolution
self.dynamic_resolution = settings.dynamic_resolution
self.batch_size = settings.batch_size
self.dynamic_crops = settings.dynamic_crops
self.action = settings.action
self.la_strength = settings.la_strength
self.sampling_batch_size = settings.sampling_batch_size
def _prepare_embeddings(
self,
cache: PromptEmbedsCache,
tokenizer: CLIPTokenizer,
text_encoder: CLIPTextModel,
):
"""
Prepare embeddings for training. When use_template is True, the embeddings will be
format using a template, and then be processed by the model.
"""
if not self.use_template:
return
template = random.choice(imagenet_templates)
target_prompt = template.format(self.settings.target)
if cache[target_prompt]:
self.target = cache[target_prompt]
else:
|
ACTION_TYPES = Literal[
"erase",
"erase_with_la",
]
class PromptEmbedsXL:
text_embeds: torch.FloatTensor
pooled_embeds: torch.FloatTensor
def __init__(self, embeds) -> None:
self.text_embeds, self.pooled_embeds = embeds
PROMPT_EMBEDDING = Union[torch.FloatTensor, PromptEmbedsXL]
class PromptEmbedsCache:
prompts: dict[str, PROMPT_EMBEDDING] = {}
def __setitem__(self, __name: str, __value: PROMPT_EMBEDDING) -> None:
self.prompts[__name] = __value
def __getitem__(self, __name: str) -> Optional[PROMPT_EMBEDDING]:
if __name in self.prompts:
return self.prompts[__name]
else:
return None
class PromptSettings(BaseModel): # yaml
target: str
positive: str = None # if None, target will be used
unconditional: str = "" # default is ""
neutral: str = None # if None, unconditional will be used
action: ACTION_TYPES = "erase" # default is "erase"
guidance_scale: float = 1.0 # default is 1.0
resolution: int = 512 # default is 512
dynamic_resolution: bool = False # default is False
batch_size: int = 1 # default is 1
dynamic_crops: bool = False # default is False. only used when model is XL
use_template: bool = False # default is False
la_strength: float = 1000.0
sampling_batch_size: int = 4
seed: int = None
case_number: int = 0
@root_validator(pre=True)
def fill_prompts(cls, values):
keys = values.keys()
if "target" not in keys:
raise ValueError("target must be specified")
if "positive" not in keys:
values["positive"] = values["target"]
if "unconditional" not in keys:
values["unconditional"] = ""
if "neutral" not in keys:
values["neutral"] = values["unconditional"]
return values
class PromptEmbedsPair:
target: PROMPT_EMBEDDING # the concept that do not want to generate
positive: PROMPT_EMBEDDING # generate the concept
unconditional: PROMPT_EMBEDDING # uncondition (default should be empty)
neutral: PROMPT_EMBEDDING # base condition (default should be empty)
use_template: bool = False # use clip template or not
guidance_scale: float
resolution: int
dynamic_resolution: bool
batch_size: int
dynamic_crops: bool
loss_fn: torch.nn.Module
action: ACTION_TYPES
def __init__(
self,
loss_fn: torch.nn.Module,
target: PROMPT_EMBEDDING,
positive: PROMPT_EMBEDDING,
unconditional: PROMPT_EMBEDDING,
neutral: PROMPT_EMBEDDING,
settings: PromptSettings,
) -> None:
self.loss_fn = loss_fn
self.target = target
self.positive = positive
self.unconditional = unconditional
self.neutral = neutral
self.settings = settings
self.use_template = settings.use_template
self.guidance_scale = settings.guidance_scale
self.resolution = settings.resolution
self.dynamic_resolution = settings.dynamic_resolution
self.batch_size = settings.batch_size
self.dynamic_crops = settings.dynamic_crops
self.action = settings.action
self.la_strength = settings.la_strength
self.sampling_batch_size = settings.sampling_batch_size
def _prepare_embeddings(
self,
cache: PromptEmbedsCache,
tokenizer: CLIPTokenizer,
text_encoder: CLIPTextModel,
):
"""
Prepare embeddings for training. When use_template is True, the embeddings will be
format using a template, and then be processed by the model.
"""
if not self.use_template:
return
template = random.choice(imagenet_templates)
target_prompt = template.format(self.settings.target)
if cache[target_prompt]:
self.target = cache[target_prompt]
else: | self.target = encode_prompts(tokenizer, text_encoder, [target_prompt]) | 1 | 2023-12-26 03:19:16+00:00 | 2k |
dakpinaroglu/Frame2seq | frame2seq/utils/score.py | [
{
"identifier": "residue_constants",
"path": "frame2seq/utils/residue_constants.py",
"snippet": "def load_stereo_chemical_props() -> Tuple[Mapping[str, List[Bond]],\n def make_bond_key(atom1_name, atom2_name):\ndef sequence_to_onehot(\n sequence: str,\n mapping: Mapping[str, int],\n) -> np.ndarray:\ndef _make_standard_atom_mask() -> np.ndarray:\ndef _make_rigid_transformation_4x4(ex, ey, translation):\nAA_TO_ID = {\n 'A': 0,\n 'C': 1,\n 'D': 2,\n 'E': 3,\n 'F': 4,\n 'G': 5,\n 'H': 6,\n 'I': 7,\n 'K': 8,\n 'L': 9,\n 'M': 10,\n 'N': 11,\n 'P': 12,\n 'Q': 13,\n 'R': 14,\n 'S': 15,\n 'T': 16,\n 'V': 17,\n 'W': 18,\n 'Y': 19,\n 'X': 20,\n}\nID_TO_AA = {\n 0: 'A',\n 1: 'C',\n 2: 'D',\n 3: 'E',\n 4: 'F',\n 5: 'G',\n 6: 'H',\n 7: 'I',\n 8: 'K',\n 9: 'L',\n 10: 'M',\n 11: 'N',\n 12: 'P',\n 13: 'Q',\n 14: 'R',\n 15: 'S',\n 16: 'T',\n 17: 'V',\n 18: 'W',\n 19: 'Y',\n 20: 'X',\n}\nSTANDARD_ATOM_MASK = _make_standard_atom_mask()"
},
{
"identifier": "get_neg_pll",
"path": "frame2seq/utils/util.py",
"snippet": "def get_neg_pll(probs, seq):\n seq_probs = torch.gather(probs, 1, seq.unsqueeze(-1)).squeeze(-1)\n neg_pll = -1 * torch.log(seq_probs)\n avg_neg_pll = neg_pll.sum().item() / len(neg_pll)\n return neg_pll, avg_neg_pll"
},
{
"identifier": "read_fasta_file",
"path": "frame2seq/utils/util.py",
"snippet": "def read_fasta_file(fasta_file):\n \"\"\"\n Read a fasta file and return a list of sequences.\n \"\"\"\n with open(fasta_file, 'r') as f:\n lines = f.readlines()\n sequences = []\n for line in lines:\n if line[0] == '>':\n sequences.append(lines[lines.index(line) + 1].strip())\n return sequences"
},
{
"identifier": "get_inference_inputs",
"path": "frame2seq/utils/pdb2input.py",
"snippet": "def get_inference_inputs(pdb_file, chain_id):\n atom_positions, aatype, seq_mask = get_parsed_inputs(pdb_file, chain_id)\n seq_mask = seq_mask.unsqueeze(0)\n aatype = torch.from_numpy(aatype)\n aatype = aatype.unsqueeze(0)\n X = atom_positions\n X = X.unsqueeze(0)\n return seq_mask, aatype, X"
},
{
"identifier": "output_csv",
"path": "frame2seq/utils/pred2output.py",
"snippet": "def output_csv(preds, csv_dir):\n \"\"\"\n Given average negative pseudo-log-likelihoods, write to a csv file.\n \"\"\"\n df = pd.DataFrame(columns=[\n 'PDBID', 'Chain ID', 'Sample Number', 'Scored sequence',\n 'Average negative pseudo-log-likelihood', 'Temperature'\n ],\n data=preds)\n df.to_csv(f\"{csv_dir}/scores.csv\", index=False)"
},
{
"identifier": "output_indiv_csv",
"path": "frame2seq/utils/pred2output.py",
"snippet": "def output_indiv_csv(scores, csv_dir):\n \"\"\"\n Given per-residue negative pseudo-log-likelihoods, write to a csv file.\n \"\"\"\n pdbid = scores['pdbid']\n chain = scores['chain']\n sample = scores['sample']\n res_idx = scores['res_idx']\n neg_pll = scores['neg_pll']\n\n df = pd.DataFrame(\n list(zip(res_idx, neg_pll)),\n columns=['Residue index', 'Negative pseudo-log-likelihood'])\n df.to_csv(f\"{csv_dir}/{pdbid}_{chain}_seq{sample}.csv\", index=False)"
}
] | import os
import torch
from tqdm import tqdm
from frame2seq.utils import residue_constants
from frame2seq.utils.util import get_neg_pll, read_fasta_file
from frame2seq.utils.pdb2input import get_inference_inputs
from frame2seq.utils.pred2output import output_csv, output_indiv_csv | 1,471 |
def score(self, pdb_file, chain_id, fasta_file, save_indiv_neg_pll):
temperature = 1.0
seq_mask, aatype, X = get_inference_inputs(pdb_file, chain_id)
seq_mask = seq_mask.to(self.device)
aatype = aatype.to(self.device)
X = X.to(self.device)
str_form = [residue_constants.ID_TO_AA[int(i)] for i in aatype[0]]
input_aatype_onehot = residue_constants.sequence_to_onehot(
sequence=str_form,
mapping=residue_constants.AA_TO_ID,
)
input_aatype_onehot = torch.from_numpy(input_aatype_onehot).float()
input_aatype_onehot = input_aatype_onehot.unsqueeze(0)
input_aatype_onehot = input_aatype_onehot.to(self.device)
input_aatype_onehot = torch.zeros_like(input_aatype_onehot)
input_aatype_onehot[:, :,
20] = 1 # all positions are masked (set to unknown)
scores, preds = {}, []
with torch.no_grad():
pred_seq1 = self.models[0].forward(X, seq_mask, input_aatype_onehot)
pred_seq2 = self.models[1].forward(X, seq_mask, input_aatype_onehot)
pred_seq3 = self.models[2].forward(X, seq_mask, input_aatype_onehot)
pred_seq = (pred_seq1 + pred_seq2 + pred_seq3) / 3 # ensemble
pred_seq = pred_seq / temperature
pred_seq = torch.nn.functional.softmax(pred_seq, dim=-1)
pred_seq = pred_seq[seq_mask]
if fasta_file is not None:
|
def score(self, pdb_file, chain_id, fasta_file, save_indiv_neg_pll):
temperature = 1.0
seq_mask, aatype, X = get_inference_inputs(pdb_file, chain_id)
seq_mask = seq_mask.to(self.device)
aatype = aatype.to(self.device)
X = X.to(self.device)
str_form = [residue_constants.ID_TO_AA[int(i)] for i in aatype[0]]
input_aatype_onehot = residue_constants.sequence_to_onehot(
sequence=str_form,
mapping=residue_constants.AA_TO_ID,
)
input_aatype_onehot = torch.from_numpy(input_aatype_onehot).float()
input_aatype_onehot = input_aatype_onehot.unsqueeze(0)
input_aatype_onehot = input_aatype_onehot.to(self.device)
input_aatype_onehot = torch.zeros_like(input_aatype_onehot)
input_aatype_onehot[:, :,
20] = 1 # all positions are masked (set to unknown)
scores, preds = {}, []
with torch.no_grad():
pred_seq1 = self.models[0].forward(X, seq_mask, input_aatype_onehot)
pred_seq2 = self.models[1].forward(X, seq_mask, input_aatype_onehot)
pred_seq3 = self.models[2].forward(X, seq_mask, input_aatype_onehot)
pred_seq = (pred_seq1 + pred_seq2 + pred_seq3) / 3 # ensemble
pred_seq = pred_seq / temperature
pred_seq = torch.nn.functional.softmax(pred_seq, dim=-1)
pred_seq = pred_seq[seq_mask]
if fasta_file is not None: | input_seqs = read_fasta_file(fasta_file) | 2 | 2023-12-25 09:29:36+00:00 | 2k |
davep/oshit | oshit/app/oshit.py | [
{
"identifier": "load_configuration",
"path": "oshit/app/data/config.py",
"snippet": "@lru_cache(maxsize=None)\ndef load_configuration() -> Configuration:\n \"\"\"Load the configuration.\n\n Returns:\n The configuration.\n\n Note:\n As a side-effect, if the configuration doesn't exist a default one\n will be saved to storage.\n\n This function is designed so that it's safe and low-cost to\n repeatedly call it. The configuration is cached and will only be\n loaded from storage when necessary.\n \"\"\"\n source = configuration_file()\n return (\n Configuration(**loads(source.read_text(encoding=\"utf-8\")))\n if source.exists()\n else save_configuration(Configuration())\n )"
},
{
"identifier": "save_configuration",
"path": "oshit/app/data/config.py",
"snippet": "def save_configuration(configuration: Configuration) -> Configuration:\n \"\"\"Save the given configuration.\n\n Args:\n The configuration to store.\n\n Returns:\n The configuration.\n \"\"\"\n load_configuration.cache_clear()\n configuration_file().write_text(\n dumps(asdict(configuration), indent=4), encoding=\"utf-8\"\n )\n return load_configuration()"
},
{
"identifier": "Main",
"path": "oshit/app/screens/main.py",
"snippet": "class Main(Screen[None]):\n \"\"\"The main screen of the application.\"\"\"\n\n CONTEXT_HELP = \"\"\"\n ## Application keys\n\n | Key | Description |\n | - | - |\n | <kbd>F1</kbd> | This help screen. |\n | <kbd>F2</kbd> | Toggle compact/relaxed display. |\n | <kbd>F3</kbd> | Toggle dark/light mode. |\n | <kbd>F12</kbd> | Quit the application. |\n | <kbd>t</kbd> | View the top stories. |\n | <kbd>n</kbd> | View the new stories. |\n | <kbd>b</kbd> | View the best stories. |\n | <kbd>a</kbd> | View the AskHN stories. |\n | <kbd>s</kbd> | View the ShowHN stories. |\n | <kbd>j</kbd> | View the jobs. |\n \"\"\"\n\n CSS = \"\"\"\n TabbedContent, LoadingIndicator {\n background: $panel;\n }\n \"\"\"\n\n TITLE = f\"Orange Site Hit v{__version__}\"\n\n BINDINGS = [\n Binding(\"f1\", \"help\", \"Help\"),\n Binding(\"f2\", \"compact\", \"Compact/Relaxed\"),\n Binding(\"f3\", \"toggle_dark\"),\n Binding(\"f12\", \"quit\", \"Quit\"),\n Binding(\"t\", \"go('top')\"),\n Binding(\"n\", \"go('new')\"),\n Binding(\"b\", \"go('best')\"),\n Binding(\"a\", \"go('ask')\"),\n Binding(\"s\", \"go('show')\"),\n Binding(\"j\", \"go('jobs')\"),\n Binding(\"down, enter\", \"pane\"),\n ]\n\n def __init__(self) -> None:\n \"\"\"Initialise the screen.\"\"\"\n super().__init__()\n config = load_configuration()\n self._hn = HN(\n max_concurrency=config.maximum_concurrency,\n timeout=config.connection_timeout,\n )\n \"\"\"The HackerNews client object.\"\"\"\n\n def compose(self) -> ComposeResult:\n \"\"\"Compose the main screen's layout.\"\"\"\n yield Header()\n with HackerNews():\n yield Items(\"top\", \"t\", self._hn.top_stories)\n yield Items(\"new\", \"n\", self._hn.new_stories)\n yield Items(\"best\", \"b\", self._hn.best_stories)\n yield Items(\"ask\", \"a\", self._hn.latest_ask_stories)\n yield Items(\"show\", \"s\", self._hn.latest_show_stories)\n yield Items(\"jobs\", \"j\", self._hn.latest_job_stories)\n yield Footer()\n\n def _refresh_subtitle(self) -> None:\n \"\"\"Refresh the subtitle of the screen.\"\"\"\n self.sub_title = self.query_one(HackerNews).description\n\n def on_mount(self) -> None:\n \"\"\"Configure things once the DOM is ready.\"\"\"\n self.set_interval(0.95, self._refresh_subtitle)\n\n def action_help(self) -> None:\n \"\"\"Show the help screen.\"\"\"\n self.app.push_screen(Help(self))\n\n def action_go(self, items: str) -> None:\n \"\"\"Go to the given list of items.\n\n Args:\n items: The name of the list of items to go to.\n \"\"\"\n self.query_one(HackerNews).active = items\n self.query_one(HackerNews).focus_active_pane()\n\n def action_compact(self) -> None:\n \"\"\"Toggle the compact display.\"\"\"\n news = self.query_one(HackerNews)\n news.compact = not news.compact\n\n @on(ShowUser)\n def show_user(self, event: ShowUser) -> None:\n \"\"\"Handle a request to show the details of a user.\"\"\"\n self.app.push_screen(UserDetails(self._hn, event.user))\n\n @on(ShowComments)\n def show_comments(self, event: ShowComments) -> None:\n \"\"\"Handle a request to show the comments for an article.\"\"\"\n self.app.push_screen(Comments(self._hn, event.article))"
}
] | from textual.app import App
from .data import load_configuration, save_configuration
from .screens import Main | 1,359 | """The main application class."""
##############################################################################
# Textual imports.
##############################################################################
# Local imports.
##############################################################################
class OSHit(App[None]):
"""The Orange Site Hit application."""
ENABLE_COMMAND_PALETTE = False
def __init__(self) -> None:
"""Initialise the application."""
super().__init__()
self.dark = load_configuration().dark_mode
def on_mount(self) -> None:
"""Get things going once the app is up and running."""
| """The main application class."""
##############################################################################
# Textual imports.
##############################################################################
# Local imports.
##############################################################################
class OSHit(App[None]):
"""The Orange Site Hit application."""
ENABLE_COMMAND_PALETTE = False
def __init__(self) -> None:
"""Initialise the application."""
super().__init__()
self.dark = load_configuration().dark_mode
def on_mount(self) -> None:
"""Get things going once the app is up and running.""" | self.push_screen(Main()) | 2 | 2023-12-25 14:06:07+00:00 | 2k |
Maximilian-Winter/llama-cpp-agent | src/llama_cpp_agent/agent_memory/memory_tools.py | [
{
"identifier": "LlamaCppFunctionTool",
"path": "src/llama_cpp_agent/function_calling.py",
"snippet": "class LlamaCppFunctionTool:\n def __init__(self, pydantic_model: Type[BaseModel], has_markdown_code_block=False, has_triple_quoted_string=False,\n **additional_parameters):\n self.model = pydantic_model\n self.look_for_field_string = has_markdown_code_block or has_triple_quoted_string\n self.has_markdown_code_block = has_markdown_code_block\n self.has_triple_quoted_string = has_triple_quoted_string\n self.additional_parameters = additional_parameters if additional_parameters else {}\n\n def __call__(self, *args, **kwargs):\n return self.model(**kwargs)"
},
{
"identifier": "CoreMemoryManager",
"path": "src/llama_cpp_agent/agent_memory/core_memory_manager.py",
"snippet": "class CoreMemoryManager:\n def __init__(self, core_memory: dict):\n self.core_memory = core_memory\n\n def add_to_core_memory(self, key: str, child_key: str, value) -> str:\n \"\"\"\n Adds or updates an entry in the core memory.\n \"\"\"\n if key not in self.core_memory:\n self.core_memory[key] = {}\n self.core_memory[key][child_key] = value\n return f\"Core memory updated. Key: {key}, Child Key: {child_key}\"\n\n def replace_in_core_memory(self, key: str, child_key: str, new_value) -> str:\n \"\"\"\n Replaces an existing entry in the core memory.\n \"\"\"\n if key in self.core_memory and child_key in self.core_memory[key]:\n self.core_memory[key][child_key] = new_value\n return f\"Core memory replaced. Key: {key}, Child Key: {child_key}\"\n else:\n return \"Key or child key not found in core memory.\"\n\n def remove_from_core_memory(self, key: str, child_key: str) -> str:\n \"\"\"\n Removes a specific field from a core memory entry.\n \"\"\"\n if key in self.core_memory and child_key in self.core_memory[key]:\n del self.core_memory[key][child_key]\n return f\"Core memory entry removed. Key: {key}, Child Key: {child_key}\"\n else:\n return \"Key or child key not found in core memory.\"\n\n def build_core_memory_context(self):\n output = json.dumps(self.core_memory, indent=4)\n context = f\"# Core-Memory:\\n{output if output != '{}' else 'Empty'}\"\n return context\n\n def load(self, file_path):\n with open(file_path, 'r', encoding='utf-8') as file:\n self.core_memory = json.load(file)\n\n def save(self, file_path):\n with open(file_path, 'w', encoding='utf-8') as file:\n json.dump(self.core_memory, file, indent=4)"
},
{
"identifier": "RetrievalMemoryManager",
"path": "src/llama_cpp_agent/agent_memory/retrieval_memory_manager.py",
"snippet": "class RetrievalMemoryManager:\n def __init__(self, retrieval_memory: RetrievalMemory):\n def add_memory_to_retrieval(self, description: str, importance: float = 1.0) -> str:\n def retrieve_memories(self, query: str, max_results: int = 5) -> str:"
}
] | from pydantic import BaseModel, Field
from ..function_calling import LlamaCppFunctionTool
from .core_memory_manager import CoreMemoryManager
from .retrieval_memory_manager import RetrievalMemoryManager, RetrievalMemory | 1,362 |
class AddCoreMemory(BaseModel):
"""
Add a new entry to the core memory.
"""
key: str = Field(..., description="The key identifier for the core memory entry.")
field: str = Field(..., description="A secondary key or field within the core memory entry.")
value: str = Field(..., description="The value or data to be stored in the specified core memory entry.")
def run(self, core_memory_manager: CoreMemoryManager):
return core_memory_manager.add_to_core_memory(self.key, self.field, self.value)
# Replace Core Memory Model
class ReplaceCoreMemory(BaseModel):
"""
Replace an entry in the core memory.
"""
key: str = Field(..., description="The key identifier for the core memory entry.")
field: str = Field(..., description="The specific field within the core memory entry to be replaced.")
new_value: str = Field(...,
description="The new value to replace the existing data in the specified core memory field.")
def run(self, core_memory_manager: CoreMemoryManager):
return core_memory_manager.replace_in_core_memory(self.key, self.field, self.value)
class RemoveCoreMemory(BaseModel):
"""
Remove an entry in the core memory.
"""
key: str = Field(..., description="The key identifier for the core memory entry to be removed.")
field: str = Field(..., description="The specific field within the core memory entry to be removed.")
def run(self, core_memory_manager: CoreMemoryManager):
return core_memory_manager.remove_from_core_memory(self.key, self.field)
class RetrieveMemories(BaseModel):
"""
Retrieve memories from the retrieval memory based on a query.
"""
query: str = Field(..., description="The query to be used to retrieve memories from the retrieval memory.")
def run(self, retrieval_memory_manager: RetrievalMemoryManager):
return retrieval_memory_manager.retrieve_memories(self.query)
class AddRetrievalMemory(BaseModel):
"""
Add memory to the retrieval memory.
"""
memory: str = Field(..., description="The memory to be added to the retrieval memory.")
importance: float = Field(..., description="The importance of the memory to be added to the retrieval memory.")
def run(self, retrieval_memory_manager: RetrievalMemoryManager):
return retrieval_memory_manager.add_memory_to_retrieval(self.memory, self.importance)
class AgentRetrievalMemory:
def __init__(self, persistent_db_path="./retrieval_memory", embedding_model_name="all-MiniLM-L6-v2",
collection_name="retrieval_memory_collection"):
|
class AddCoreMemory(BaseModel):
"""
Add a new entry to the core memory.
"""
key: str = Field(..., description="The key identifier for the core memory entry.")
field: str = Field(..., description="A secondary key or field within the core memory entry.")
value: str = Field(..., description="The value or data to be stored in the specified core memory entry.")
def run(self, core_memory_manager: CoreMemoryManager):
return core_memory_manager.add_to_core_memory(self.key, self.field, self.value)
# Replace Core Memory Model
class ReplaceCoreMemory(BaseModel):
"""
Replace an entry in the core memory.
"""
key: str = Field(..., description="The key identifier for the core memory entry.")
field: str = Field(..., description="The specific field within the core memory entry to be replaced.")
new_value: str = Field(...,
description="The new value to replace the existing data in the specified core memory field.")
def run(self, core_memory_manager: CoreMemoryManager):
return core_memory_manager.replace_in_core_memory(self.key, self.field, self.value)
class RemoveCoreMemory(BaseModel):
"""
Remove an entry in the core memory.
"""
key: str = Field(..., description="The key identifier for the core memory entry to be removed.")
field: str = Field(..., description="The specific field within the core memory entry to be removed.")
def run(self, core_memory_manager: CoreMemoryManager):
return core_memory_manager.remove_from_core_memory(self.key, self.field)
class RetrieveMemories(BaseModel):
"""
Retrieve memories from the retrieval memory based on a query.
"""
query: str = Field(..., description="The query to be used to retrieve memories from the retrieval memory.")
def run(self, retrieval_memory_manager: RetrievalMemoryManager):
return retrieval_memory_manager.retrieve_memories(self.query)
class AddRetrievalMemory(BaseModel):
"""
Add memory to the retrieval memory.
"""
memory: str = Field(..., description="The memory to be added to the retrieval memory.")
importance: float = Field(..., description="The importance of the memory to be added to the retrieval memory.")
def run(self, retrieval_memory_manager: RetrievalMemoryManager):
return retrieval_memory_manager.add_memory_to_retrieval(self.memory, self.importance)
class AgentRetrievalMemory:
def __init__(self, persistent_db_path="./retrieval_memory", embedding_model_name="all-MiniLM-L6-v2",
collection_name="retrieval_memory_collection"): | self.retrieval_memory = RetrievalMemory(persistent_db_path, embedding_model_name, collection_name) | 2 | 2023-12-29 16:54:39+00:00 | 2k |
tedivm/paracelsus | paracelsus/cli.py | [
{
"identifier": "Dot",
"path": "paracelsus/transformers/dot.py",
"snippet": "class Dot:\n comment_format: str = \"dot\"\n metadata: MetaData\n graph: pydot.Dot\n\n def __init__(self, metaclass: MetaData) -> None:\n self.metadata = metaclass\n self.graph = pydot.Dot(\"database\", graph_type=\"graph\")\n\n for table in self.metadata.tables.values():\n node = pydot.Node(name=table.name)\n node.set_label(self._table_label(table))\n node.set_shape(\"none\")\n node.set_margin(\"0\")\n self.graph.add_node(node)\n for column in table.columns:\n for foreign_key in column.foreign_keys:\n key_parts = foreign_key.target_fullname.split(\".\")\n left_table = key_parts[0]\n left_column = key_parts[1]\n edge = pydot.Edge(left_table, table.name)\n edge.set_label(column.name)\n edge.set_dir(\"both\")\n\n edge.set_arrowhead(\"none\")\n if not column.unique:\n edge.set_arrowhead(\"crow\")\n\n l_column = self.metadata.tables[left_table].columns[left_column]\n edge.set_arrowtail(\"none\")\n if not l_column.unique and not l_column.primary_key:\n edge.set_arrowtail(\"crow\")\n\n self.graph.add_edge(edge)\n\n def _table_label(self, table: Table) -> str:\n column_output = \"\"\n columns = sorted(table.columns, key=utils.column_sort_key)\n for column in columns:\n attributes = set([])\n if column.primary_key:\n attributes.add(\"Primary Key\")\n\n if len(column.foreign_keys) > 0:\n attributes.add(\"Foreign Key\")\n\n if column.unique:\n attributes.add(\"Unique\")\n\n column_output += f' <tr><td align=\"left\">{column.type}</td><td align=\"left\">{column.name}</td><td>{\", \".join(sorted(attributes))}</td></tr>\\n'\n\n return f\"\"\"<\n <table border=\"0\" cellborder=\"1\" cellspacing=\"0\" cellpadding=\"4\">\n <tr><td colspan=\"3\" bgcolor=\"lightblue\"><b>{table.name}</b></td></tr>\n{column_output.rstrip()}\n </table>\n>\"\"\"\n\n def __str__(self) -> str:\n return self.graph.to_string()"
},
{
"identifier": "Mermaid",
"path": "paracelsus/transformers/mermaid.py",
"snippet": "class Mermaid:\n comment_format: str = \"mermaid\"\n metadata: MetaData\n\n def __init__(self, metaclass: MetaData) -> None:\n self.metadata = metaclass\n\n def _table(self, table: Table) -> str:\n output = f\"\\t{table.name}\"\n output += \" {\\n\"\n columns = sorted(table.columns, key=utils.column_sort_key)\n for column in columns:\n output += self._column(column)\n output += \"\\t}\\n\\n\"\n return output\n\n def _column(self, column: Column) -> str:\n column_str = f\"{column.type} {column.name}\"\n\n if column.primary_key:\n if len(column.foreign_keys) > 0:\n column_str += \" PK,FK\"\n else:\n column_str += \" PK\"\n elif len(column.foreign_keys) > 0:\n column_str += \" FK\"\n\n options = []\n\n if column.nullable:\n options.append(\"nullable\")\n\n if column.unique:\n options.append(\"unique\")\n\n if column.index:\n options.append(\"indexed\")\n\n if len(options) > 0:\n column_str += f' \"{\",\".join(options)}\"'\n\n return f\"\\t\\t{column_str}\\n\"\n\n def _relationships(self, column: Column) -> str:\n output = \"\"\n\n column_name = column.name\n right_table = column.table.name\n\n if column.unique:\n right_operand = \"o|\"\n else:\n right_operand = \"o{\"\n\n for foreign_key in column.foreign_keys:\n key_parts = foreign_key.target_fullname.split(\".\")\n left_table = key_parts[0]\n left_column = key_parts[1]\n left_operand = \"\"\n\n lcolumn = self.metadata.tables[left_table].columns[left_column]\n if lcolumn.unique or lcolumn.primary_key:\n left_operand = \"||\"\n else:\n left_operand = \"}o\"\n\n output += f\"\\t{left_table} {left_operand}--{right_operand} {right_table} : {column_name}\\n\"\n return output\n\n def __str__(self) -> str:\n output = \"erDiagram\\n\"\n for table in self.metadata.tables.values():\n output += self._table(table)\n\n for table in self.metadata.tables.values():\n for column in table.columns.values():\n if len(column.foreign_keys) > 0:\n output += self._relationships(column)\n\n return output"
}
] | import importlib
import re
import sys
import typer
from enum import Enum
from pathlib import Path
from typing import List
from typing_extensions import Annotated
from .transformers.dot import Dot
from .transformers.mermaid import Mermaid
from . import _version | 1,289 |
app = typer.Typer()
transformers = {
"mmd": Mermaid,
"mermaid": Mermaid,
|
app = typer.Typer()
transformers = {
"mmd": Mermaid,
"mermaid": Mermaid, | "dot": Dot, | 0 | 2023-12-29 22:13:23+00:00 | 2k |
winniesi/tg-gemini-bot | api/handle.py | [
{
"identifier": "is_authorized",
"path": "api/auth.py",
"snippet": "def is_authorized(from_id: int, user_name: str) -> bool:\n if str(user_name) in ALLOWED_USERS:\n return True\n return False"
},
{
"identifier": "ChatManager",
"path": "api/context.py",
"snippet": "class ChatManager:\n \"\"\"setting up a basic conversation storage manager\"\"\"\n\n def __init__(self):\n self.chats: Dict[str, ChatConversation] = {}\n\n def _new_chat(self, username: str) -> ChatConversation:\n chat = ChatConversation()\n self.chats[username] = chat\n return chat\n\n def get_chat(self, username: str) -> ChatConversation:\n if self.chats.get(username) is None:\n return self._new_chat(username)\n return self.chats[username]"
},
{
"identifier": "ImageChatManger",
"path": "api/context.py",
"snippet": "class ImageChatManger:\n def __init__(self, prompt, file_id: str) -> None:\n self.prompt = prompt\n self.file_id = file_id\n\n def tel_photo_url(self) -> str:\n \"\"\"process telegram photo url\"\"\"\n r_file_id = requests.get(\n f\"https://api.telegram.org/bot{BOT_TOKEN}/getFile?file_id={self.file_id}\"\n )\n file_path = r_file_id.json().get(\"result\").get(\"file_path\")\n download_url = f\"https://api.telegram.org/file/bot{BOT_TOKEN}/{file_path}\"\n return download_url\n\n def photo_bytes(self) -> BytesIO:\n \"\"\"get photo bytes\"\"\"\n photo_url = self.tel_photo_url()\n response = requests.get(photo_url)\n photo_bytes = BytesIO(response.content)\n return photo_bytes\n\n def send_image(self) -> str:\n response = generate_text_with_image(self.prompt, self.photo_bytes())\n return response"
},
{
"identifier": "Update",
"path": "api/telegram.py",
"snippet": "class Update:\n def __init__(self, update: Dict) -> None:\n self.update = update\n self.from_id = update[\"message\"][\"from\"][\"id\"]\n self.type = self._type()\n self.text = self._text()\n self.photo_caption = self._photo_caption()\n self.file_id = self._file_id()\n self.user_name = update[\"message\"][\"from\"][\"username\"]\n\n def _type(self):\n if \"text\" in self.update[\"message\"]:\n return \"text\"\n elif \"photo\" in self.update[\"message\"]:\n return \"photo\"\n else:\n return \"\"\n\n def _photo_caption(self):\n if self.type == \"photo\":\n return self.update[\"message\"].get(\"caption\", \"describe the photo\")\n return \"\"\n\n def _text(self):\n if self.type == \"text\":\n return self.update[\"message\"][\"text\"]\n return \"\"\n\n def _file_id(self):\n if self.type == \"photo\":\n return self.update[\"message\"][\"photo\"][0][\"file_id\"]\n return \"\""
},
{
"identifier": "send_message",
"path": "api/telegram.py",
"snippet": "def send_message(chat_id, text):\n \"\"\"send text message\"\"\"\n payload = {\n \"chat_id\": chat_id,\n \"text\": escape(text),\n \"parse_mode\": \"MarkdownV2\",\n }\n r = requests.post(f\"{TELEGRAM_API}/sendMessage\", data=payload)\n print(f\"Sent message: {text} to {chat_id}\")\n return r"
}
] | from .auth import is_authorized
from .context import ChatManager, ImageChatManger
from .telegram import Update, send_message | 971 | """
All the chat that comes through the Telegram bot gets passed to the
handle_message function. This function checks out if the user has the
green light to chat with the bot. Once that's sorted, it figures out if
the user sent words or an image and deals with it accordingly.
For text messages, it fires up the ChatManager class that keeps track of
the back-and-forth with that user.
As for images, in Gemini pro, they're context-free, so you can handle
them pretty straight-up without much fuss.
"""
chat_manager = ChatManager()
def handle_message(update_data):
update = Update(update_data)
authorized = is_authorized(update.from_id, update.user_name)
if not authorized:
| """
All the chat that comes through the Telegram bot gets passed to the
handle_message function. This function checks out if the user has the
green light to chat with the bot. Once that's sorted, it figures out if
the user sent words or an image and deals with it accordingly.
For text messages, it fires up the ChatManager class that keeps track of
the back-and-forth with that user.
As for images, in Gemini pro, they're context-free, so you can handle
them pretty straight-up without much fuss.
"""
chat_manager = ChatManager()
def handle_message(update_data):
update = Update(update_data)
authorized = is_authorized(update.from_id, update.user_name)
if not authorized: | send_message(update.from_id, "😫 You are not allowed to use this bot.") | 4 | 2023-12-25 03:27:43+00:00 | 2k |
usail-hkust/LLMTSCS | run_advanced_maxpressure.py | [
{
"identifier": "oneline_wrapper",
"path": "utils/utils.py",
"snippet": "def oneline_wrapper(dic_agent_conf, dic_traffic_env_conf, dic_path, roadnet, trafficflow):\n results_table = []\n all_rewards = []\n all_queue_len = []\n all_travel_time = []\n for i in range(1):\n dic_path[\"PATH_TO_MODEL\"] = (dic_path[\"PATH_TO_MODEL\"].split(\".\")[0] + \".json\" +\n time.strftime('%m_%d_%H_%M_%S', time.localtime(time.time())))\n dic_path[\"PATH_TO_WORK_DIRECTORY\"] = (dic_path[\"PATH_TO_WORK_DIRECTORY\"].split(\".\")[0] + \".json\" +\n time.strftime('%m_%d_%H_%M_%S', time.localtime(time.time())))\n oneline = OneLine(dic_agent_conf=dic_agent_conf,\n dic_traffic_env_conf=merge(config.dic_traffic_env_conf, dic_traffic_env_conf),\n dic_path=merge(config.DIC_PATH, dic_path),\n roadnet=roadnet,\n trafficflow=trafficflow\n )\n round_results = oneline.train(round=i)\n results_table.append([round_results['test_reward_over'], round_results['test_avg_queue_len_over'],\n round_results['test_avg_travel_time_over']])\n all_rewards.append(round_results['test_reward_over'])\n all_queue_len.append(round_results['test_avg_queue_len_over'])\n all_travel_time.append(round_results['test_avg_travel_time_over'])\n\n # delete junk\n cmd_delete_model = 'rm -rf <dir>'.replace(\"<dir>\", dic_path[\"PATH_TO_MODEL\"])\n cmd_delete_work = 'find <dir> -type f ! -name \"state_action.json\" -exec rm -rf {} \\;'.replace(\"<dir>\", dic_path[\"PATH_TO_WORK_DIRECTORY\"])\n os.system(cmd_delete_model)\n os.system(cmd_delete_work)\n\n results_table.append([np.average(all_rewards), np.average(all_queue_len), np.average(all_travel_time)])\n results_table.append([np.std(all_rewards), np.std(all_queue_len), np.std(all_travel_time)])\n\n table_logger = wandb.init(\n project=dic_traffic_env_conf['PROJECT_NAME'],\n group=f\"{dic_traffic_env_conf['MODEL_NAME']}-{roadnet}-{trafficflow}-{len(dic_agent_conf['FIXED_TIME'])}_Phases\",\n name=\"exp_results\",\n config=merge(merge(dic_agent_conf, dic_path), dic_traffic_env_conf),\n )\n columns = [\"reward\", \"avg_queue_len\", \"avg_travel_time\"]\n logger_table = wandb.Table(columns=columns, data=results_table)\n table_logger.log({\"results\": logger_table})\n wandb.finish()\n\n return"
},
{
"identifier": "error",
"path": "utils/error.py",
"snippet": "class flowFileException(Exception):\n def __init__(self, message):\n def __str__(self):"
}
] | from utils.utils import oneline_wrapper
from utils import error
from multiprocessing import Process
import os
import time
import argparse | 1,154 |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--memo", type=str, default='AdvancedMaxPressure')
parser.add_argument("--model", type=str, default="AdvancedMaxPressure")
parser.add_argument("--proj_name", type=str, default="chatgpt-TSCS")
parser.add_argument("--eightphase", action="store_true", default=False)
parser.add_argument("--multi_process", action="store_true", default=True)
parser.add_argument("--workers", type=int, default=1)
parser.add_argument("--dataset", type=str, default="template")
parser.add_argument("--traffic_file", type=str, default="flow_main_stream.json")
return parser.parse_args()
def main(in_args):
traffic_file_list = []
if in_args.dataset == 'jinan':
count = 3600
road_net = "3_4"
traffic_file_list = ["anon_3_4_jinan_real.json", "anon_3_4_jinan_real_2000.json", "anon_3_4_jinan_real_2500.json"]
template = "Jinan"
elif in_args.dataset == 'hangzhou':
count = 3600
road_net = "4_4"
traffic_file_list = ["anon_4_4_hangzhou_real.json", "anon_4_4_hangzhou_real_5816.json"]
template = "Hangzhou"
elif in_args.dataset == 'newyork_16x3':
count = 3600
road_net = "16_3"
traffic_file_list = ["anon_16_3_newyork_real.json"]
template = "NewYork"
elif in_args.dataset == 'newyork_28x7':
count = 3600
road_net = "28_7"
traffic_file_list = ["anon_28_7_newyork_real_double.json", "anon_28_7_newyork_real_triple.json"]
template = "NewYork"
elif in_args.dataset == 'template':
count = 3600
road_net = "1_1"
traffic_file_list = ["flow_main_stream.json"]
template = "template"
# flow_file error
try:
if in_args.traffic_file not in traffic_file_list:
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--memo", type=str, default='AdvancedMaxPressure')
parser.add_argument("--model", type=str, default="AdvancedMaxPressure")
parser.add_argument("--proj_name", type=str, default="chatgpt-TSCS")
parser.add_argument("--eightphase", action="store_true", default=False)
parser.add_argument("--multi_process", action="store_true", default=True)
parser.add_argument("--workers", type=int, default=1)
parser.add_argument("--dataset", type=str, default="template")
parser.add_argument("--traffic_file", type=str, default="flow_main_stream.json")
return parser.parse_args()
def main(in_args):
traffic_file_list = []
if in_args.dataset == 'jinan':
count = 3600
road_net = "3_4"
traffic_file_list = ["anon_3_4_jinan_real.json", "anon_3_4_jinan_real_2000.json", "anon_3_4_jinan_real_2500.json"]
template = "Jinan"
elif in_args.dataset == 'hangzhou':
count = 3600
road_net = "4_4"
traffic_file_list = ["anon_4_4_hangzhou_real.json", "anon_4_4_hangzhou_real_5816.json"]
template = "Hangzhou"
elif in_args.dataset == 'newyork_16x3':
count = 3600
road_net = "16_3"
traffic_file_list = ["anon_16_3_newyork_real.json"]
template = "NewYork"
elif in_args.dataset == 'newyork_28x7':
count = 3600
road_net = "28_7"
traffic_file_list = ["anon_28_7_newyork_real_double.json", "anon_28_7_newyork_real_triple.json"]
template = "NewYork"
elif in_args.dataset == 'template':
count = 3600
road_net = "1_1"
traffic_file_list = ["flow_main_stream.json"]
template = "template"
# flow_file error
try:
if in_args.traffic_file not in traffic_file_list: | raise error.flowFileException('Flow file does not exist.') | 1 | 2023-12-26 08:31:47+00:00 | 2k |
ohadmata/shmessy | src/shmessy/types/unix_timestamp.py | [
{
"identifier": "InferredField",
"path": "src/shmessy/schema.py",
"snippet": "class InferredField(BaseModel):\n inferred_type: Optional[str] = None\n inferred_pattern: Optional[Any] = None"
},
{
"identifier": "ValidatorTypes",
"path": "src/shmessy/schema.py",
"snippet": "class ValidatorTypes(str, Enum):\n NUMERIC = \"NUMERIC\"\n STRING = \"STRING\""
},
{
"identifier": "BaseType",
"path": "src/shmessy/types/base.py",
"snippet": "class BaseType(ABC):\n weight: int = 0\n validator_types: Tuple[ValidatorTypes]\n\n @abstractmethod\n def validate(self, data: ndarray) -> Optional[InferredField]:\n pass\n\n @abstractmethod\n def fix(self, column: Series, inferred_field: InferredField) -> Series:\n pass\n\n def is_validator_type_valid(self, dtype: Type) -> bool:\n for possible_validator_type in self.validator_types:\n if self._check_single_validator_type(dtype, possible_validator_type):\n return True\n return False\n\n @staticmethod\n def _check_single_validator_type(\n dtype: Type, possible_validator_type: ValidatorTypes\n ) -> bool:\n if possible_validator_type == ValidatorTypes.NUMERIC and not issubdtype(\n dtype, number\n ):\n return False\n\n if possible_validator_type == ValidatorTypes.STRING and not (\n issubdtype(dtype, object_) or issubdtype(dtype, str_)\n ):\n return False\n return True\n\n @property\n def name(self) -> str:\n return str(self.__class__.__name__.replace(\"Type\", \"\"))"
}
] | import logging
import math
from datetime import datetime
from enum import Enum
from typing import Optional
from numpy import ndarray
from pandas import Series, to_datetime
from ..schema import InferredField, ValidatorTypes
from .base import BaseType | 669 |
logger = logging.getLogger(__name__)
class TimestampResolution(str, Enum):
SECONDS = "s"
MILLISECONDS = "ms"
NANOSECONDS = "ns"
class UnixTimestampType(BaseType):
weight = 4
validator_types = (ValidatorTypes.NUMERIC,)
min_valid_year: int = 1980
max_valid_year: int = 2100
@staticmethod
def _unix_timestamp_resolution(value: float) -> TimestampResolution:
number_of_digits = len(str(int(value)))
if number_of_digits == 10:
return TimestampResolution.SECONDS
if number_of_digits == 13:
return TimestampResolution.MILLISECONDS
if number_of_digits == 16:
return TimestampResolution.NANOSECONDS
@staticmethod
def _fix_input_resolution(
value: float, selected_resolution: TimestampResolution
) -> float:
if selected_resolution == TimestampResolution.SECONDS:
return value
if selected_resolution == TimestampResolution.MILLISECONDS:
return value / 1000
if selected_resolution == TimestampResolution.NANOSECONDS:
return value / 1000 / 1000
|
logger = logging.getLogger(__name__)
class TimestampResolution(str, Enum):
SECONDS = "s"
MILLISECONDS = "ms"
NANOSECONDS = "ns"
class UnixTimestampType(BaseType):
weight = 4
validator_types = (ValidatorTypes.NUMERIC,)
min_valid_year: int = 1980
max_valid_year: int = 2100
@staticmethod
def _unix_timestamp_resolution(value: float) -> TimestampResolution:
number_of_digits = len(str(int(value)))
if number_of_digits == 10:
return TimestampResolution.SECONDS
if number_of_digits == 13:
return TimestampResolution.MILLISECONDS
if number_of_digits == 16:
return TimestampResolution.NANOSECONDS
@staticmethod
def _fix_input_resolution(
value: float, selected_resolution: TimestampResolution
) -> float:
if selected_resolution == TimestampResolution.SECONDS:
return value
if selected_resolution == TimestampResolution.MILLISECONDS:
return value / 1000
if selected_resolution == TimestampResolution.NANOSECONDS:
return value / 1000 / 1000
| def validate(self, data: ndarray) -> Optional[InferredField]: | 0 | 2023-12-27 20:15:01+00:00 | 2k |
kokiez/solana-sniper | monitor_price_strategy.py | [
{
"identifier": "get_price",
"path": "birdeye.py",
"snippet": "def get_price(token_address):\r\n url = f\"https://api.dexscreener.com/latest/dex/tokens/{token_address}\"\r\n exclude = ['EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v', 'Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB']\r\n response = requests.get(url).json()\r\n \r\n if token_address not in exclude:\r\n for pair in response['pairs']:\r\n if pair['quoteToken']['address'] == 'So11111111111111111111111111111111111111112':\r\n return float(pair['priceUsd'])\r\n else:\r\n return response['pairs'][0]['priceUsd']\r\n return None\r"
},
{
"identifier": "getSymbol",
"path": "birdeye.py",
"snippet": "def getSymbol(token):\r\n # usdc and usdt\r\n exclude = ['EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v', 'Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB']\r\n \r\n if token not in exclude:\r\n url = f\"https://api.dexscreener.com/latest/dex/tokens/{token}\"\r\n\r\n Token_Symbol = \"\"\r\n Sol_symbol=\"\"\r\n try:\r\n response = requests.get(url)\r\n\r\n # Check if the request was successful (status code 200)\r\n if response.status_code == 200:\r\n resp = response.json()\r\n print(\"Response:\",resp['pairs'][0]['baseToken']['symbol'])\r\n for pair in resp['pairs']:\r\n quoteToken = pair['quoteToken']['symbol']\r\n\r\n if quoteToken == 'SOL':\r\n Token_Symbol = pair['baseToken']['symbol']\r\n Sol_symbol = quoteToken\r\n return Token_Symbol, Sol_symbol\r\n\r\n\r\n else:\r\n print(f\"[getSymbol] Request failed with status code {response.status_code}\")\r\n\r\n except requests.exceptions.RequestException as e:\r\n print(f\"[getSymbol] error occurred: {e}\")\r\n except: \r\n a = 1\r\n\r\n return Token_Symbol, Sol_symbol\r\n else:\r\n if token == 'EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v':\r\n return \"USDC\", \"SOL\"\r\n elif token == 'EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v':\r\n return \"USDT\", \"SOL\"\r"
},
{
"identifier": "sendWebhook",
"path": "webhook.py",
"snippet": "def sendWebhook(title_type_info, description):\r\n global error_webhook\r\n global webhook_url\r\n title = \"\"\r\n title_type = title_type_info.split(\"|\")\r\n if title_type[0] == \"msg\":\r\n title = title_type[1]\r\n color = colors[\"Green\"]\r\n webhook(title, color, description, webhook_url)\r\n \r\n elif title_type[0] == \"msg_b\":\r\n title = title_type[1]\r\n color = colors[\"DarkAqua\"]\r\n webhook(title, color, description, webhook_url)\r\n\r\n elif title_type[0] == \"msg_s\":\r\n title = title_type[1]\r\n color = colors[\"DarkAqua\"]\r\n webhook(title, color, description, webhook_url)\r\n\r\n elif title_type[0] == \"i_s\": #invest or slippage was changed etc\r\n title = title_type[1]\r\n color = colors[\"DarkPurple\"]\r\n webhook(title, color, description, webhook_url)\r\n \r\n elif title_type[0] == \"e\": #error\r\n title = title_type[1]\r\n color = colors[\"DarkRed\"]\r\n webhook(title, color, description, error_webhook)\r\n\r\n elif title_type[0] == \"a\": #alert\r\n title = title_type[1]\r\n color = colors[\"LuminousVividPink\"]\r\n webhook(title, color, description, webhook_url)\r\n\r\n elif title_type[0] == \"w\": #wallet info\r\n title = title_type[1]\r\n color = colors[\"Gold\"]\r\n webhook(title, color, description, webhook_url)\r"
}
] | import time
from birdeye import get_price, getSymbol
from webhook import sendWebhook
| 1,376 |
"""If you have ton of trades then best to use Simulate Transaction and modify this part of code to your needs"""
"""
Only Take Profit
"""
def limit_order(bought_token_price,desired_token_address, take_profit_ratio, execution_time, txB):
token_symbol, SOl_Symbol = getSymbol(desired_token_address)
# CALCULATE SELL LIMIT
sell_limit_token_price = bought_token_price * take_profit_ratio
print("-" * 79)
print(f"| {'Bought Price':<12} | {'Sell Limit':<12} | {'Tx Buy':<50} |")
print("-" * 79)
print(f"|{bought_token_price:.12f} | {sell_limit_token_price:.12f} {txB:<50} |")
print("-" * 79)
sendWebhook(f"msg_b|BUY INFO {token_symbol}",f"Bought Price: {bought_token_price:.12f}\n**Sell Limit: {sell_limit_token_price:.15f}**\nTotal Buy Execution time: {execution_time} seconds\nBuy TXN: https://solscan.io/tx/{txB} |")
# LOOP = CHECK IF PRICE >= SELL LIMIT | checks price every 5 seconds
priceLow = True
# while priceLow and isTimePassed(time_limit) == False:
while priceLow:
# Check if time limit has been passed for the token bought or not
|
"""If you have ton of trades then best to use Simulate Transaction and modify this part of code to your needs"""
"""
Only Take Profit
"""
def limit_order(bought_token_price,desired_token_address, take_profit_ratio, execution_time, txB):
token_symbol, SOl_Symbol = getSymbol(desired_token_address)
# CALCULATE SELL LIMIT
sell_limit_token_price = bought_token_price * take_profit_ratio
print("-" * 79)
print(f"| {'Bought Price':<12} | {'Sell Limit':<12} | {'Tx Buy':<50} |")
print("-" * 79)
print(f"|{bought_token_price:.12f} | {sell_limit_token_price:.12f} {txB:<50} |")
print("-" * 79)
sendWebhook(f"msg_b|BUY INFO {token_symbol}",f"Bought Price: {bought_token_price:.12f}\n**Sell Limit: {sell_limit_token_price:.15f}**\nTotal Buy Execution time: {execution_time} seconds\nBuy TXN: https://solscan.io/tx/{txB} |")
# LOOP = CHECK IF PRICE >= SELL LIMIT | checks price every 5 seconds
priceLow = True
# while priceLow and isTimePassed(time_limit) == False:
while priceLow:
# Check if time limit has been passed for the token bought or not
| bought_token_curr_price = get_price(desired_token_address)
| 0 | 2023-12-26 11:40:05+00:00 | 2k |
enochyearn/MLX_RoBERTa | mlx_roberta.py | [
{
"identifier": "LayerNormBasselCorrected",
"path": "custom/nn/layers/normalization.py",
"snippet": "class LayerNormBasselCorrected(Module):\n r\"\"\"Applies layer normalization [1] on the inputs with Bessel's Correction used by default like PyTorch.\n\n Computes\n\n .. math::\n\n y = \\frac{x - E[x]}{\\sqrt{Var[x]} + \\epsilon} \\gamma + \\beta,\n\n where :math:`\\gamma` and :math:`\\beta` are learned per feature dimension\n parameters initialized at 1 and 0 respectively.\n\n Var[x] would by default apply Bessel's Correction.\n\n [1]: https://arxiv.org/abs/1607.06450\n\n Args:\n dims (int): The feature dimension of the input to normalize over\n eps (float): A small additive constant for numerical stability\n affine (bool): If True learn an affine transform to apply after the\n normalization\n correction (bool): \n \"\"\"\n\n def __init__(self, dims: int, eps: float = 1e-5, affine: bool = True, correction: bool = True):\n super().__init__()\n if affine:\n self.bias = mx.zeros((dims,))\n self.weight = mx.ones((dims,))\n self.eps = eps\n self.dims = dims\n self.correction = correction\n\n def _extra_repr(self):\n return f\"{self.dims}, eps={self.eps}, affine={'weight' in self}\"\n\n def __call__(self, x):\n means = mx.mean(x, axis=-1, keepdims=True)\n var = mx.var(x, axis=-1, keepdims=True, ddof=int(self.correction))\n x = (x - means) * mx.rsqrt(var + self.eps)\n return (self.weight * x + self.bias) if \"weight\" in self else x"
},
{
"identifier": "LayerNormTorchAlike",
"path": "custom/nn/layers/normalization.py",
"snippet": "class LayerNormTorchAlike(Module):\n r\"\"\"Applies layer normalization [1] on the inputs in PyTorch's style.\n MLX's official LayerNorm has a different behavior with PyTorch's.\n\n Computes\n\n .. math::\n\n y = \\frac{x - E[x]}{\\sqrt{Var[x]} + \\epsilon} \\gamma + \\beta,\n\n where :math:`\\gamma` and :math:`\\beta` are learned per feature dimension\n parameters initialized at 1 and 0 respectively.\n\n Var[x] would by default apply Bessel's Correction.\n\n [1]: https://arxiv.org/abs/1607.06450\n\n Args:\n dims (int): The feature dimension of the input to normalize over\n eps (float): A small additive constant for numerical stability\n affine (bool): If True learn an affine transform to apply after the\n normalization\n correction (bool): \n \"\"\"\n\n def __init__(self, dims: int, eps: float = 1e-5, affine: bool = True, correction: bool = True):\n super().__init__()\n if affine:\n self.bias = mx.zeros((dims,))\n self.weight = mx.ones((dims,))\n self.eps = eps\n self.dims = dims\n self.correction = correction\n\n def _extra_repr(self):\n return f\"{self.dims}, eps={self.eps}, affine={'weight' in self}\"\n\n def __call__(self, x):\n # Calculate the mean of all elements;\n # i.e. the means for each element $\\mathbb{E}[X]$\n mean = x.mean(axis=-1, keepdims=True)\n # Calculate the squared mean of all elements;\n # i.e. the means for each element $\\mathbb{E}[X^2]$\n mean_x2 = (x ** 2).mean(axis=-1, keepdims=True)\n # Variance of all element $Var[X] = \\mathbb{E}[X^2] - \\mathbb{E}[X]^2$\n var = mean_x2 - mean ** 2\n\n # Normalize $$\\hat{X} = \\frac{X - \\mathbb{E}[X]}{\\sqrt{Var[X] + \\epsilon}}$$\n x_norm = (x - mean) / mx.sqrt(var + self.eps)\n # Scale and shift $$\\text{LN}(x) = \\gamma \\hat{X} + \\beta$$ \n x_norm = self.weight * x_norm + self.bias\n return x_norm"
}
] | import argparse
import time
import mlx.core as mx
import mlx.nn as nn
import numpy as np
import math
from mlx.utils import tree_unflatten
from collections import OrderedDict
from custom.nn.layers.normalization import LayerNormBasselCorrected, LayerNormTorchAlike
from transformers import RobertaTokenizer
from dataclasses import dataclass | 1,439 |
# utils
@dataclass
class ModelConfig:
intermediate_size: int = 3072
hidden_size: int = 768
no_heads: int = 12
hidden_layers: int = 12
vocab_size: int = 50265
attention_probs_dropout_prob: float = 0.1
hidden_dropout_prob: float = 0.1
layer_norm_eps: float = 1e-5
max_position_embeddings: int = 514
# QA model's parameters
num_labels: int = 2
type_vocab_size: int = 2
pad_token_id: int = 1
chunk_size_feed_forward: int = 0
model_configs = {
"deepset/roberta-base-squad2": ModelConfig(),
"roberta-base": ModelConfig(),
}
model_types = {
"deepset/roberta-base-squad2": "qa",
"roberta-base": "base",
}
class RobertaEmbeddings(nn.Module):
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
|
# utils
@dataclass
class ModelConfig:
intermediate_size: int = 3072
hidden_size: int = 768
no_heads: int = 12
hidden_layers: int = 12
vocab_size: int = 50265
attention_probs_dropout_prob: float = 0.1
hidden_dropout_prob: float = 0.1
layer_norm_eps: float = 1e-5
max_position_embeddings: int = 514
# QA model's parameters
num_labels: int = 2
type_vocab_size: int = 2
pad_token_id: int = 1
chunk_size_feed_forward: int = 0
model_configs = {
"deepset/roberta-base-squad2": ModelConfig(),
"roberta-base": ModelConfig(),
}
model_types = {
"deepset/roberta-base-squad2": "qa",
"roberta-base": "base",
}
class RobertaEmbeddings(nn.Module):
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
| self.LayerNorm = LayerNormTorchAlike(config.hidden_size, eps=config.layer_norm_eps, correction=True) | 1 | 2023-12-22 05:48:57+00:00 | 2k |
zy7y/dfs-generate | main.py | [
{
"identifier": "CodeGen",
"path": "entity.py",
"snippet": "class CodeGen(BaseVo):\n name: str\n code: str\n\n @field_serializer(\"code\")\n def serialize_code(self, code: str, _info):\n _code = black.format_str(code, mode=black.FileMode())\n return isort.code(_code)"
},
{
"identifier": "Conf",
"path": "entity.py",
"snippet": "class Conf(SQLModel, table=True):\n __tablename__ = \"dfs_conf\"\n id: int = Field(None, primary_key=True)\n db_uri: str = Field(..., description=\"数据库连接\")\n\n @classmethod\n def get_db_uri_last_new(cls):\n \"\"\"获取最新的db_url\"\"\"\n with Session(engine) as session:\n query = select(cls).order_by(cls.id.desc())\n latest_conf = session.exec(query).first()\n if latest_conf:\n return latest_conf.db_uri\n else:\n return None\n\n @classmethod\n def create(cls, uri) -> \"Conf\":\n with Session(engine) as session:\n obj = cls(db_uri=uri)\n session.add(obj)\n session.commit()\n session.refresh(obj)\n return obj\n\n @classmethod\n def get_last_uri_with_metadata(cls):\n uri = cls.get_db_uri_last_new()\n return uri, get_metadata_by_db_uri(uri)"
},
{
"identifier": "DBConf",
"path": "entity.py",
"snippet": "class DBConf(SQLModel):\n user: str\n password: str\n port: int\n host: str\n db: str\n\n def get_db_uri(self):\n return f\"mysql+pymysql://{self.user}:{self.password}@{self.host}:{self.port}/{self.db}\"\n\n def get_metadata(self):\n return get_metadata_by_db_uri(self.get_db_uri())"
},
{
"identifier": "R",
"path": "entity.py",
"snippet": "class R(BaseModel, Generic[T]):\n code: int = 20000\n msg: str = \"ok\"\n data: Optional[T] = None\n\n @classmethod\n def success(cls, **kwargs):\n return cls(**kwargs)\n\n @classmethod\n def error(cls, msg):\n return cls(code=40000, msg=msg)"
},
{
"identifier": "RList",
"path": "entity.py",
"snippet": "class RList(R[T]):\n data: List[T] = Field(default_factory=list)"
},
{
"identifier": "Table",
"path": "entity.py",
"snippet": "class Table(BaseVo):\n table_name: str\n table_comment: Optional[str] = None"
},
{
"identifier": "generate_code",
"path": "generate/main.py",
"snippet": "def generate_code(table: Table, uri: str):\n return [\n {\"name\": \"model.py\", \"code\": GenerateEntity(table).render()},\n {\"name\": \"router.py\", \"code\": render_router(table.name)},\n {\"name\": \"main.py\", \"code\": render_main(table.name)},\n {\"name\": \"db.py\", \"code\": render_db(uri)},\n ]"
}
] | from fastapi import FastAPI, Query
from fastapi.requests import Request
from fastapi.responses import FileResponse
from fastapi.staticfiles import StaticFiles
from entity import CodeGen, Conf, DBConf, R, RList, Table
from generate.main import generate_code
import uvicorn | 789 |
app = FastAPI(
title="dfs-generate", description="FastAPI SQLModel 逆向生成代码", docs_url=None
)
app.mount("/static", StaticFiles(directory="static"), name="static")
@app.get("/", include_in_schema=False)
def index():
return FileResponse("static/index.html")
|
app = FastAPI(
title="dfs-generate", description="FastAPI SQLModel 逆向生成代码", docs_url=None
)
app.mount("/static", StaticFiles(directory="static"), name="static")
@app.get("/", include_in_schema=False)
def index():
return FileResponse("static/index.html")
| @app.get("/tables", response_model=RList[Table]) | 5 | 2023-12-23 08:32:58+00:00 | 2k |
CrawlScript/Torch-MGDCF | torch_mgdcf/evaluation/ranking.py | [
{
"identifier": "ndcg_score",
"path": "torch_mgdcf/metrics/ranking.py",
"snippet": "def ndcg_score(reference, hypothesis):\n \"\"\"\n Normalized Discounted Cumulative Gain (nDCG)\n Normalized version of DCG:\n nDCG = DCG(hypothesis)/DCG(reference)\n\n Parameters:\n reference - a gold standard (perfect) ordering Ex: [5,4,3,2,1]\n hypothesis - a proposed ordering Ex: [5,2,2,3,1]\n\n Returns:\n ndcg_score - normalized score\n \"\"\"\n\n return dcg_score(hypothesis)/dcg_score(reference)"
},
{
"identifier": "precision_score",
"path": "torch_mgdcf/metrics/ranking.py",
"snippet": "def precision_score(reference, hypothesis):\n result = np.sum(hypothesis, dtype=np.float32)/len(hypothesis)\n return result"
},
{
"identifier": "recall_score",
"path": "torch_mgdcf/metrics/ranking.py",
"snippet": "def recall_score(reference, hypothesis):\n result = np.sum(hypothesis, dtype=np.float32) / len(reference)\n return result"
},
{
"identifier": "VectorSearchEngine",
"path": "torch_mgdcf/vector_search/vector_search.py",
"snippet": "class VectorSearchEngine(object):\n def __init__(self, vectors):\n super().__init__()\n if isinstance(vectors, torch.Tensor):\n self.vectors = vectors.detach().cpu().numpy()\n else:\n self.vectors = np.array(vectors)\n self.dim = self.vectors.shape[1]\n\n self.index = faiss.IndexFlatIP(self.dim)\n self.index.add(self.vectors)\n\n def search(self, query_vectors, k=10):\n query_vectors = np.asarray(query_vectors)\n topK_distances, topK_indices = self.index.search(query_vectors, k)\n\n return topK_distances, topK_indices"
}
] | from tqdm import tqdm
from torch_mgdcf.metrics.ranking import ndcg_score, precision_score, recall_score
from torch_mgdcf.vector_search.vector_search import VectorSearchEngine
import numpy as np
import torch | 765 | # coding=utf-8
# The code is from our another project GRecX: https://github.com/maenzhier/grecx_datasets
def score(ground_truth, pred_items, k_list, metrics):
pred_match = [1 if item in ground_truth else 0 for item in pred_items]
max_k = k_list[-1]
if len(ground_truth) > max_k:
ndcg_gold = [1] * max_k
else:
ndcg_gold = [1] * len(ground_truth) + [0] * (max_k - len(ground_truth))
res_score = []
for metric in metrics:
if metric == "ndcg":
score_func = ndcg_score
elif metric == "precision":
score_func = precision_score
elif metric == "recall":
score_func = recall_score
else:
raise Exception("Not Found Metric : {}".format(metric))
for k in k_list:
if metric == "ndcg":
res_score.append(score_func(ndcg_gold[:k], pred_match[:k]))
else:
res_score.append(score_func(ground_truth, pred_match[:k]))
return res_score
def evaluate_mean_global_metrics(user_items_dict, user_mask_items_dict,
user_embedding, item_embedding,
k_list=[10, 20], metrics=["ndcg"]):
| # coding=utf-8
# The code is from our another project GRecX: https://github.com/maenzhier/grecx_datasets
def score(ground_truth, pred_items, k_list, metrics):
pred_match = [1 if item in ground_truth else 0 for item in pred_items]
max_k = k_list[-1]
if len(ground_truth) > max_k:
ndcg_gold = [1] * max_k
else:
ndcg_gold = [1] * len(ground_truth) + [0] * (max_k - len(ground_truth))
res_score = []
for metric in metrics:
if metric == "ndcg":
score_func = ndcg_score
elif metric == "precision":
score_func = precision_score
elif metric == "recall":
score_func = recall_score
else:
raise Exception("Not Found Metric : {}".format(metric))
for k in k_list:
if metric == "ndcg":
res_score.append(score_func(ndcg_gold[:k], pred_match[:k]))
else:
res_score.append(score_func(ground_truth, pred_match[:k]))
return res_score
def evaluate_mean_global_metrics(user_items_dict, user_mask_items_dict,
user_embedding, item_embedding,
k_list=[10, 20], metrics=["ndcg"]):
| v_search = VectorSearchEngine(item_embedding) | 3 | 2023-12-26 10:26:50+00:00 | 2k |
KyanChen/TTP | opencd/models/data_preprocessor.py | [
{
"identifier": "SampleList",
"path": "mmseg/utils/typing_utils.py",
"snippet": ""
},
{
"identifier": "MODELS",
"path": "opencd/registry.py",
"snippet": "MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['opencd.models'])"
}
] | from numbers import Number
from typing import Any, Dict, List, Optional, Sequence, Union
from mmengine.model import BaseDataPreprocessor
from mmseg.utils import SampleList
from opencd.registry import MODELS
import numpy as np
import torch
import torch.nn.functional as F | 1,234 | # Copyright (c) Open-CD. All rights reserved.
def stack_batch(inputs: List[torch.Tensor],
data_samples: Optional[SampleList] = None,
size: Optional[tuple] = None,
size_divisor: Optional[int] = None,
pad_val: Union[int, float] = 0,
seg_pad_val: Union[int, float] = 255) -> torch.Tensor:
"""Stack multiple inputs to form a batch and pad the images and gt_sem_segs
to the max shape use the right bottom padding mode.
Args:
inputs (List[Tensor]): The input multiple tensors. each is a
CHW 3D-tensor.
data_samples (list[:obj:`SegDataSample`]): The list of data samples.
It usually includes information such as `gt_sem_seg`.
size (tuple, optional): Fixed padding size.
size_divisor (int, optional): The divisor of padded size.
pad_val (int, float): The padding value. Defaults to 0
seg_pad_val (int, float): The padding value. Defaults to 255
Returns:
Tensor: The 4D-tensor.
List[:obj:`SegDataSample`]: After the padding of the gt_seg_map.
"""
assert isinstance(inputs, list), \
f'Expected input type to be list, but got {type(inputs)}'
assert len({tensor.ndim for tensor in inputs}) == 1, \
f'Expected the dimensions of all inputs must be the same, ' \
f'but got {[tensor.ndim for tensor in inputs]}'
assert inputs[0].ndim == 3, f'Expected tensor dimension to be 3, ' \
f'but got {inputs[0].ndim}'
assert len({tensor.shape[0] for tensor in inputs}) == 1, \
f'Expected the channels of all inputs must be the same, ' \
f'but got {[tensor.shape[0] for tensor in inputs]}'
# only one of size and size_divisor should be valid
assert (size is not None) ^ (size_divisor is not None), \
'only one of size and size_divisor should be valid'
padded_inputs = []
padded_samples = []
inputs_sizes = [(img.shape[-2], img.shape[-1]) for img in inputs]
max_size = np.stack(inputs_sizes).max(0)
if size_divisor is not None and size_divisor > 1:
# the last two dims are H,W, both subject to divisibility requirement
max_size = (max_size +
(size_divisor - 1)) // size_divisor * size_divisor
for i in range(len(inputs)):
tensor = inputs[i]
if size is not None:
width = max(size[-1] - tensor.shape[-1], 0)
height = max(size[-2] - tensor.shape[-2], 0)
# (padding_left, padding_right, padding_top, padding_bottom)
padding_size = (0, width, 0, height)
elif size_divisor is not None:
width = max(max_size[-1] - tensor.shape[-1], 0)
height = max(max_size[-2] - tensor.shape[-2], 0)
padding_size = (0, width, 0, height)
else:
padding_size = [0, 0, 0, 0]
# pad img
pad_img = F.pad(tensor, padding_size, value=pad_val)
padded_inputs.append(pad_img)
# pad gt_sem_seg
if data_samples is not None:
data_sample = data_samples[i]
gt_sem_seg = data_sample.gt_sem_seg.data
del data_sample.gt_sem_seg.data
data_sample.gt_sem_seg.data = F.pad(
gt_sem_seg, padding_size, value=seg_pad_val)
if 'gt_edge_map' in data_sample:
gt_edge_map = data_sample.gt_edge_map.data
del data_sample.gt_edge_map.data
data_sample.gt_edge_map.data = F.pad(
gt_edge_map, padding_size, value=seg_pad_val)
if 'gt_seg_map_from' in data_sample:
gt_seg_map_from = data_sample.gt_seg_map_from.data
del data_sample.gt_seg_map_from.data
data_sample.gt_seg_map_from.data = F.pad(
gt_seg_map_from, padding_size, value=seg_pad_val)
if 'gt_seg_map_to' in data_sample:
gt_seg_map_to = data_sample.gt_seg_map_to.data
del data_sample.gt_seg_map_to.data
data_sample.gt_seg_map_to.data = F.pad(
gt_seg_map_to, padding_size, value=seg_pad_val)
data_sample.set_metainfo({
'img_shape': tensor.shape[-2:],
'pad_shape': data_sample.gt_sem_seg.shape,
'padding_size': padding_size
})
padded_samples.append(data_sample)
else:
padded_samples.append(
dict(
img_padding_size=padding_size,
pad_shape=pad_img.shape[-2:]))
return torch.stack(padded_inputs, dim=0), padded_samples
| # Copyright (c) Open-CD. All rights reserved.
def stack_batch(inputs: List[torch.Tensor],
data_samples: Optional[SampleList] = None,
size: Optional[tuple] = None,
size_divisor: Optional[int] = None,
pad_val: Union[int, float] = 0,
seg_pad_val: Union[int, float] = 255) -> torch.Tensor:
"""Stack multiple inputs to form a batch and pad the images and gt_sem_segs
to the max shape use the right bottom padding mode.
Args:
inputs (List[Tensor]): The input multiple tensors. each is a
CHW 3D-tensor.
data_samples (list[:obj:`SegDataSample`]): The list of data samples.
It usually includes information such as `gt_sem_seg`.
size (tuple, optional): Fixed padding size.
size_divisor (int, optional): The divisor of padded size.
pad_val (int, float): The padding value. Defaults to 0
seg_pad_val (int, float): The padding value. Defaults to 255
Returns:
Tensor: The 4D-tensor.
List[:obj:`SegDataSample`]: After the padding of the gt_seg_map.
"""
assert isinstance(inputs, list), \
f'Expected input type to be list, but got {type(inputs)}'
assert len({tensor.ndim for tensor in inputs}) == 1, \
f'Expected the dimensions of all inputs must be the same, ' \
f'but got {[tensor.ndim for tensor in inputs]}'
assert inputs[0].ndim == 3, f'Expected tensor dimension to be 3, ' \
f'but got {inputs[0].ndim}'
assert len({tensor.shape[0] for tensor in inputs}) == 1, \
f'Expected the channels of all inputs must be the same, ' \
f'but got {[tensor.shape[0] for tensor in inputs]}'
# only one of size and size_divisor should be valid
assert (size is not None) ^ (size_divisor is not None), \
'only one of size and size_divisor should be valid'
padded_inputs = []
padded_samples = []
inputs_sizes = [(img.shape[-2], img.shape[-1]) for img in inputs]
max_size = np.stack(inputs_sizes).max(0)
if size_divisor is not None and size_divisor > 1:
# the last two dims are H,W, both subject to divisibility requirement
max_size = (max_size +
(size_divisor - 1)) // size_divisor * size_divisor
for i in range(len(inputs)):
tensor = inputs[i]
if size is not None:
width = max(size[-1] - tensor.shape[-1], 0)
height = max(size[-2] - tensor.shape[-2], 0)
# (padding_left, padding_right, padding_top, padding_bottom)
padding_size = (0, width, 0, height)
elif size_divisor is not None:
width = max(max_size[-1] - tensor.shape[-1], 0)
height = max(max_size[-2] - tensor.shape[-2], 0)
padding_size = (0, width, 0, height)
else:
padding_size = [0, 0, 0, 0]
# pad img
pad_img = F.pad(tensor, padding_size, value=pad_val)
padded_inputs.append(pad_img)
# pad gt_sem_seg
if data_samples is not None:
data_sample = data_samples[i]
gt_sem_seg = data_sample.gt_sem_seg.data
del data_sample.gt_sem_seg.data
data_sample.gt_sem_seg.data = F.pad(
gt_sem_seg, padding_size, value=seg_pad_val)
if 'gt_edge_map' in data_sample:
gt_edge_map = data_sample.gt_edge_map.data
del data_sample.gt_edge_map.data
data_sample.gt_edge_map.data = F.pad(
gt_edge_map, padding_size, value=seg_pad_val)
if 'gt_seg_map_from' in data_sample:
gt_seg_map_from = data_sample.gt_seg_map_from.data
del data_sample.gt_seg_map_from.data
data_sample.gt_seg_map_from.data = F.pad(
gt_seg_map_from, padding_size, value=seg_pad_val)
if 'gt_seg_map_to' in data_sample:
gt_seg_map_to = data_sample.gt_seg_map_to.data
del data_sample.gt_seg_map_to.data
data_sample.gt_seg_map_to.data = F.pad(
gt_seg_map_to, padding_size, value=seg_pad_val)
data_sample.set_metainfo({
'img_shape': tensor.shape[-2:],
'pad_shape': data_sample.gt_sem_seg.shape,
'padding_size': padding_size
})
padded_samples.append(data_sample)
else:
padded_samples.append(
dict(
img_padding_size=padding_size,
pad_shape=pad_img.shape[-2:]))
return torch.stack(padded_inputs, dim=0), padded_samples
| @MODELS.register_module() | 1 | 2023-12-23 08:36:47+00:00 | 2k |
N0rz3/Phunter | lib/lookup.py | [
{
"identifier": "free",
"path": "lib/free_lookup.py",
"snippet": "async def free(phone_number):\r\n r = await Request(\"https://free-lookup.net/{}\".format(phone_number), headers={'user-agent': random.choice(agent)}).get()\r\n\r\n html_body = BeautifulSoup(r.text, \"html.parser\")\r\n list_info = html_body.findChild(\"ul\", class_=\"report-summary__list\").findAll(\"div\")\r\n\r\n info_dict = {\r\n k.text.strip(): info.text.strip() if info.text.strip() else \"Not found\"\r\n for _, (k, info) in enumerate(zip(list_info[::2], list_info[1::2]))\r\n }\r\n\r\n print(f\"\\n [{GREEN}>{WHITE}] Free-lookup\")\r\n\r\n for key, value in info_dict.items():\r\n if value != \"Not found\":\r\n print(f\" ├── {key}: {value}\")\r\n\r\n else:\r\n continue"
},
{
"identifier": "spamcalls",
"path": "lib/spam.py",
"snippet": "async def spamcalls(p_n):\r\n print(f\"\\n [{GREEN}>{WHITE}] Spamcalls\")\r\n\r\n url = f\"https://spamcalls.net/en/number/{p_n}\"\r\n\r\n r = await Request(url, headers={'user-agent': random.choice(user_agent)}).get()\r\n\r\n if r.status_code == 200:\r\n print(f\" └── {RED}!{WHITE} Spammer\")\r\n\r\n else:\r\n print(f\" └── {GREEN}>{WHITE} Not spammer\")"
}
] | import phonenumbers
import json
from phonenumbers import carrier
from .reputation import *
from .free_lookup import free
from .spam import spamcalls
from lib.text import *
| 809 |
async def lookup(phone_number):
print()
parsed = phonenumbers.parse(phone_number)
operator = carrier.name_for_number(parsed, "fr")
line = phonenumbers.number_type(parsed)
if line == phonenumbers.PhoneNumberType.FIXED_LINE:
ligne = f" [{GREEN}>{WHITE}] Line type: Fixed"
elif line == phonenumbers.PhoneNumberType.MOBILE:
ligne = f" [{GREEN}>{WHITE}] Line type: Mobile"
else:
ligne = " [-] Line not found"
possible = phonenumbers.is_possible_number(parsed)
valid = phonenumbers.is_valid_number(parsed)
with open("lib/country.json", "r") as file:
read = json.load(file)
d = 0
countrys = []
for country, code in read.items():
d += 1
if phone_number.startswith(code):
countrys.append(country)
if d == 153:
break
else:
continue
else:
continue
print(f"{WHITE}📞 Phone number: {BLUE}{phone_number}{WHITE}")
if possible == True:
pos = {"possible": "✔️"}
else:
pos = {"possible": "❌"}
if valid == True:
val = {"valid": "✔️"}
else:
val = {"valid": "❌"}
print(f" [{GREEN}>{WHITE}] Possible: {pos['possible']}")
print(f" [{GREEN}>{WHITE}] Valid: {val['valid']}")
print()
if operator != "":
print(f" [{GREEN}>{WHITE}] Operator: {operator}")
else:
print(f" [-] Not Operator")
try:
print(f" [{GREEN}>{WHITE}] Possible location: " + str(countrys).replace("[", "").replace("]", "").replace("'", ""))
except:
print(f" [-] Not location")
print(ligne)
await reputation(phone_number)
|
async def lookup(phone_number):
print()
parsed = phonenumbers.parse(phone_number)
operator = carrier.name_for_number(parsed, "fr")
line = phonenumbers.number_type(parsed)
if line == phonenumbers.PhoneNumberType.FIXED_LINE:
ligne = f" [{GREEN}>{WHITE}] Line type: Fixed"
elif line == phonenumbers.PhoneNumberType.MOBILE:
ligne = f" [{GREEN}>{WHITE}] Line type: Mobile"
else:
ligne = " [-] Line not found"
possible = phonenumbers.is_possible_number(parsed)
valid = phonenumbers.is_valid_number(parsed)
with open("lib/country.json", "r") as file:
read = json.load(file)
d = 0
countrys = []
for country, code in read.items():
d += 1
if phone_number.startswith(code):
countrys.append(country)
if d == 153:
break
else:
continue
else:
continue
print(f"{WHITE}📞 Phone number: {BLUE}{phone_number}{WHITE}")
if possible == True:
pos = {"possible": "✔️"}
else:
pos = {"possible": "❌"}
if valid == True:
val = {"valid": "✔️"}
else:
val = {"valid": "❌"}
print(f" [{GREEN}>{WHITE}] Possible: {pos['possible']}")
print(f" [{GREEN}>{WHITE}] Valid: {val['valid']}")
print()
if operator != "":
print(f" [{GREEN}>{WHITE}] Operator: {operator}")
else:
print(f" [-] Not Operator")
try:
print(f" [{GREEN}>{WHITE}] Possible location: " + str(countrys).replace("[", "").replace("]", "").replace("'", ""))
except:
print(f" [-] Not location")
print(ligne)
await reputation(phone_number)
| await free(str(phone_number).replace("+", ""))
| 0 | 2023-12-30 13:21:14+00:00 | 2k |
dan-r/HomeAssistant-Ohme | custom_components/ohme/binary_sensor.py | [
{
"identifier": "DOMAIN",
"path": "custom_components/ohme/const.py",
"snippet": "DOMAIN = \"ohme\""
},
{
"identifier": "DATA_COORDINATORS",
"path": "custom_components/ohme/const.py",
"snippet": "DATA_COORDINATORS = \"coordinators\""
},
{
"identifier": "COORDINATOR_CHARGESESSIONS",
"path": "custom_components/ohme/const.py",
"snippet": "COORDINATOR_CHARGESESSIONS = 0"
},
{
"identifier": "COORDINATOR_ADVANCED",
"path": "custom_components/ohme/const.py",
"snippet": "COORDINATOR_ADVANCED = 3"
},
{
"identifier": "DATA_CLIENT",
"path": "custom_components/ohme/const.py",
"snippet": "DATA_CLIENT = \"client\""
},
{
"identifier": "OhmeChargeSessionsCoordinator",
"path": "custom_components/ohme/coordinator.py",
"snippet": "class OhmeChargeSessionsCoordinator(DataUpdateCoordinator):\n \"\"\"Coordinator to pull main charge state and power/current draw.\"\"\"\n\n def __init__(self, hass):\n \"\"\"Initialise coordinator.\"\"\"\n super().__init__(\n hass,\n _LOGGER,\n name=\"Ohme Charge Sessions\",\n update_interval=timedelta(seconds=30),\n )\n self._client = hass.data[DOMAIN][DATA_CLIENT]\n\n async def _async_update_data(self):\n \"\"\"Fetch data from API endpoint.\"\"\"\n try:\n return await self._client.async_get_charge_sessions()\n\n except BaseException:\n raise UpdateFailed(\"Error communicating with API\")"
},
{
"identifier": "OhmeAdvancedSettingsCoordinator",
"path": "custom_components/ohme/coordinator.py",
"snippet": "class OhmeAdvancedSettingsCoordinator(DataUpdateCoordinator):\n \"\"\"Coordinator to pull CT clamp reading.\"\"\"\n\n def __init__(self, hass):\n \"\"\"Initialise coordinator.\"\"\"\n super().__init__(\n hass,\n _LOGGER,\n name=\"Ohme Advanced Settings\",\n update_interval=timedelta(minutes=1),\n )\n self._client = hass.data[DOMAIN][DATA_CLIENT]\n\n async def _async_update_data(self):\n \"\"\"Fetch data from API endpoint.\"\"\"\n try:\n return await self._client.async_get_advanced_settings()\n\n except BaseException:\n raise UpdateFailed(\"Error communicating with API\")"
},
{
"identifier": "charge_graph_in_slot",
"path": "custom_components/ohme/utils.py",
"snippet": "def charge_graph_in_slot(charge_start, points, skip_format=False):\n \"\"\"Are we currently in a charge slot?\"\"\"\n now = int(time())\n data = points if skip_format else _format_charge_graph(charge_start, points)\n\n # Loop through every value, skipping the last\n for idx in range(0, len(data) - 1):\n # This is our current point\n if data[idx][\"t\"] < now and data[idx + 1][\"t\"] > now:\n # If the delta line we are on is steeper than 10,\n # we are in a charge slot.\n if data[idx + 1][\"y\"] - data[idx][\"y\"] > 10:\n return True\n break\n\n return False"
}
] | import logging
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity
)
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.util.dt import (utcnow)
from .const import DOMAIN, DATA_COORDINATORS, COORDINATOR_CHARGESESSIONS, COORDINATOR_ADVANCED, DATA_CLIENT
from .coordinator import OhmeChargeSessionsCoordinator, OhmeAdvancedSettingsCoordinator
from .utils import charge_graph_in_slot | 823 | """Platform for sensor integration."""
from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: core.HomeAssistant,
config_entry: config_entries.ConfigEntry,
async_add_entities,
):
"""Setup sensors and configure coordinator."""
client = hass.data[DOMAIN][DATA_CLIENT]
| """Platform for sensor integration."""
from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: core.HomeAssistant,
config_entry: config_entries.ConfigEntry,
async_add_entities,
):
"""Setup sensors and configure coordinator."""
client = hass.data[DOMAIN][DATA_CLIENT] | coordinator = hass.data[DOMAIN][DATA_COORDINATORS][COORDINATOR_CHARGESESSIONS] | 1 | 2023-12-24 20:59:18+00:00 | 2k |
Almas-Ali/SpyIP | spyip/backend.py | [
{
"identifier": "TooManyRequests",
"path": "spyip/exceptions.py",
"snippet": "class TooManyRequests(Exception):\n pass"
},
{
"identifier": "ConnectionTimeout",
"path": "spyip/exceptions.py",
"snippet": "class ConnectionTimeout(Exception):\n pass"
},
{
"identifier": "StatusError",
"path": "spyip/exceptions.py",
"snippet": "class StatusError(Exception):\n pass"
},
{
"identifier": "IPResponse",
"path": "spyip/models.py",
"snippet": "class IPResponse(BaseModel):\n \"\"\"\n Example response from API:\n\n {\n \"status\": \"success\",\n \"continent\": \"Asia\",\n \"continentCode\": \"AS\",\n \"country\": \"India\",\n \"countryCode\": \"IN\",\n \"region\": \"DL\",\n \"regionName\": \"National Capital Territory of Delhi\",\n \"city\": \"New Delhi\",\n \"district\": \"\",\n \"zip\": \"110001\",\n \"lat\": 28.6139,\n \"lon\": 77.209,\n \"timezone\": \"Asia/Kolkata\",\n \"offset\": 19800,\n \"currency\": \"INR\",\n \"isp\": \"Google LLC\",\n \"org\": \"Google LLC\",\n \"as\": \"AS15169 Google LLC\",\n \"asname\": \"GOOGLE\",\n \"mobile\": false,\n \"proxy\": false,\n \"hosting\": true,\n \"query\": \"142.250.193.206\",\n }\n \"\"\"\n\n status: str = Field(..., description='Status of the request.')\n continent: str = Field(..., description='Continent name.')\n continentCode: str = Field(..., description='Continent code.')\n country: str = Field(..., description='Country name.')\n countryCode: str = Field(..., description='Country code.')\n region: str = Field(..., description='Region code.')\n regionName: str = Field(..., description='Region name.')\n city: str = Field(..., description='City name.')\n district: str = Field(..., description='District name.')\n zip_: str = Field(..., description='Zip code.')\n lat: float = Field(..., description='Latitude.')\n lon: float = Field(..., description='Longitude.')\n timezone: str = Field(..., description='Timezone.')\n offset: int = Field(..., description='Offset.')\n currency: str = Field(..., description='Currency.')\n isp: str = Field(..., description='ISP name.')\n org: str = Field(..., description='Organization name.')\n as_: str = Field(..., description='AS number and name.')\n asname: str = Field(..., description='AS name.')\n mobile: bool = Field(..., description='Mobile status.')\n proxy: bool = Field(..., description='Proxy status.')\n hosting: bool = Field(..., description='Hosting status.')\n query: str = Field(..., description='IP address.')\n\n class Config:\n def alias_generator(x):\n return x.replace('_', '')\n\n populate_by_name = True\n # fields = { # Alias for reserved keywords\n # \"as_\": \"as\",\n # \"zip_\": \"zip\",\n # }\n\n @field_validator('status')\n def check_status(cls, v):\n if v != 'success':\n raise ValueError('Invalid IP address.')\n return v\n\n def json(self, **kwargs) -> str:\n return self.model_dump_json(**kwargs)"
},
{
"identifier": "DNSResponse",
"path": "spyip/models.py",
"snippet": "class DNSResponse(BaseModel):\n \"\"\"\n Example response from API:\n \"dns\": {\n \"ip\": \"74.125.73.83\",\n \"geo\": \"United States - Google\"\n }\n \"\"\"\n\n ip: str = Field(..., description='IP address.')\n geo: str = Field(..., description='Geo location.')\n\n def json(self, **kwargs) -> str:\n return self.model_dump_json(**kwargs)"
}
] | from typing import List, Union
from .exceptions import (
TooManyRequests,
ConnectionTimeout,
StatusError,
)
from .models import (
IPResponse,
DNSResponse,
)
import asyncio
import random
import string
import httpx | 1,207 |
def get_random_string(length: int = 32) -> str:
"""Generate a random string of fixed length."""
letters = string.ascii_lowercase + string.digits
return ''.join(random.sample(letters, length))
# API endpoints for IP address lookup
trace_me_url = 'http://ip-api.com/json/'
trace_ip_url = 'http://ip-api.com/json/%(query)s'
trace_dns_url = f'http://{get_random_string(32)}.edns.ip-api.com/json/'
trace_ip_batch_url = 'http://ip-api.com/batch'
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
}
def trace_me(
timeout: int = 5,
lang: str = 'en',
|
def get_random_string(length: int = 32) -> str:
"""Generate a random string of fixed length."""
letters = string.ascii_lowercase + string.digits
return ''.join(random.sample(letters, length))
# API endpoints for IP address lookup
trace_me_url = 'http://ip-api.com/json/'
trace_ip_url = 'http://ip-api.com/json/%(query)s'
trace_dns_url = f'http://{get_random_string(32)}.edns.ip-api.com/json/'
trace_ip_batch_url = 'http://ip-api.com/batch'
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
}
def trace_me(
timeout: int = 5,
lang: str = 'en', | ) -> Union[IPResponse, None]: | 3 | 2023-12-31 19:43:38+00:00 | 2k |
leopedroso45/Stable-Diffusion-ImageGen | tests/test_process_task.py | [
{
"identifier": "check_cuda_and_clear_cache",
"path": "sevsd/process_task.py",
"snippet": "def check_cuda_and_clear_cache():\n r\"\"\"\n Clears the CUDA cache if available, otherwise performs garbage collection.\n This function is called to manage memory usage, particularly when working with large models or multiple image generations.\n \"\"\"\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n else:\n gc.collect()"
},
{
"identifier": "process_task",
"path": "sevsd/process_task.py",
"snippet": "def process_task(job, pipeline, executor, path, parallel_exec=True):\n r\"\"\"\n Processes a single image generation job using the specified pipeline and execution parameters.\n\n This function handles the generation of one or more images based on a given job description. It supports both parallel and sequential execution modes. Generated images are saved to the specified path.\n\n Parameters:\n job (dict): A dictionary containing details for the image generation task. It includes 'prompt' and optionally 'negative_prompt'.\n pipeline (callable): The Stable Diffusion pipeline callable used for generating images.\n executor (dict): A dictionary containing execution parameters such as 'num_of_exec', 'cfg_scale', and 'inference_steps'.\n path (str): The directory path where generated images will be saved.\n parallel_exec (bool, optional): If True, generates all specified images in parallel. Defaults to True.\n\n The function saves each generated image with a unique timestamp in the specified path and prints the save location. In case of any exceptions, they are caught and printed.\n\n Example:\n job = {\n \"prompt\": \"A scenic landscape\",\n \"negative_prompt\": \"blurred image, black and white, watermarked image\"\n }\n executor = {\n \"num_of_exec\": 2,\n \"cfg_scale\": 7,\n \"inference_steps\": 50\n }\n pipeline = setup_pipeline(\"CompVis/stable-diffusion-v1-4\")\n process_task(job, pipeline, executor, \"./generated-images\", parallel_exec=False)\n\n Note:\n This function also handles CUDA cache clearing and garbage collection for memory management.\n \"\"\"\n \n def call_generate_image():\n images = generate_image(job, pipeline, executor, parallel_exec)\n if images is not None:\n for image in images:\n timestamp = datetime.now().strftime(\"%Y%m%d_%H%M%S%f\")\n image_path = f\"{path}/generated_image_{timestamp}.png\"\n image.save(image_path)\n print(f\"[sevsd] - image saved at {image_path}\")\n else:\n print(\"[sevsd] - image generation failed due to memory constraints.\")\n check_cuda_and_clear_cache()\n \n try:\n path = check_os_path(path)\n if job is not None:\n if parallel_exec is not True:\n num_images = executor.get(\"num_of_exec\", 1)\n for _ in range(num_images):\n call_generate_image()\n else:\n call_generate_image()\n except Exception as e:\n print(f\"[sevsd] - exception: {e}\")\n finally:\n check_cuda_and_clear_cache()"
},
{
"identifier": "check_os_path",
"path": "sevsd/process_task.py",
"snippet": "def check_os_path(path):\n r\"\"\"\n Checks if the given path exists, and if not, creates the necessary directories.\n This function ensures that the output path for saving images is available.\n\n Parameters:\n path (str): The directory path to check and create if necessary.\n\n Returns:\n str: The verified or created directory path.\n \"\"\"\n if not os.path.exists(path):\n os.makedirs(path)\n print(f\"[sevsd] - created path: {path}\")\n return path"
}
] | import unittest
import sys
from unittest.mock import patch, MagicMock
from sevsd.process_task import check_cuda_and_clear_cache, process_task, check_os_path | 991 | sys.path.append('../')
class TestProcessTask(unittest.TestCase):
@patch('sevsd.process_task.generate_image')
def test_process_task(self, mock_generate_image):
mock_image = MagicMock()
mock_image.save = MagicMock()
mock_generate_image.return_value = [mock_image]
fake_job = {"prompt": "prompt", "details": (None, 50, 1, 7.5)}
fake_pipeline = MagicMock()
fake_executor = {"num_of_exec": 1, "cfg_scale": 7}
fake_path = "test_path"
| sys.path.append('../')
class TestProcessTask(unittest.TestCase):
@patch('sevsd.process_task.generate_image')
def test_process_task(self, mock_generate_image):
mock_image = MagicMock()
mock_image.save = MagicMock()
mock_generate_image.return_value = [mock_image]
fake_job = {"prompt": "prompt", "details": (None, 50, 1, 7.5)}
fake_pipeline = MagicMock()
fake_executor = {"num_of_exec": 1, "cfg_scale": 7}
fake_path = "test_path"
| process_task(fake_job, fake_pipeline, fake_executor, fake_path, parallel_exec=True) | 1 | 2023-12-28 16:19:12+00:00 | 2k |
Emperor-WS/PyEmber | ember/autograd/numeric.py | [
{
"identifier": "Hook",
"path": "ember/autograd/hook.py",
"snippet": "class Hook:\n \"\"\"\n Hook class for attaching gradient functions to tensors.\n\n Hooks allow users to attach custom gradient functions to tensors for\n monitoring or modifying gradients during backpropagation.\n\n Attributes:\n - tensor (Tensor): The target tensor.\n - grad_fn (callable): The gradient function to be applied to the tensor.\n\n Methods:\n - __init__(self, tensor, grad_fn): Constructor for Hook class.\n - __repr__(self): String representation of the Hook instance.\n\n \"\"\"\n\n __slots__ = 'tensor', 'grad_fn'\n\n def __init__(self, tensor, grad_fn):\n \"\"\"\n Constructor for the Hook class.\n\n Args:\n - tensor (Tensor): The target tensor.\n - grad_fn (callable): The gradient function to be applied to the tensor.\n\n \"\"\"\n self.tensor = tensor\n self.grad_fn = grad_fn\n\n def __repr__(self):\n \"\"\"\n String representation of the Hook instance.\n\n Returns:\n - str: A string containing information about the tensor and its associated gradient function.\n\n \"\"\"\n # Extract the class name from the qualified name of the gradient function\n grad_name = self.grad_fn.__qualname__.split('.')[0]\n return f\"Hook(tensor_id={self.tensor.id}, grad_fn={grad_name.upper()})\""
},
{
"identifier": "numpy_unpad",
"path": "ember/autograd/_utils.py",
"snippet": "def numpy_unpad(x, pad_width):\n \"\"\"\n Remove padding from an array.\n\n Args:\n - x (numpy.ndarray): Input array.\n - pad_width (tuple of ints): Amount of padding on each dimension.\n\n Returns:\n - numpy.ndarray: Unpadded array.\n\n \"\"\"\n slices = []\n for pad in pad_width:\n end = None if pad[1] == 0 else -pad[1]\n slices.append(slice(pad[0], end ))\n return x[tuple(slices)]"
},
{
"identifier": "inv_permutation",
"path": "ember/autograd/_utils.py",
"snippet": "def inv_permutation(permutation):\n \"\"\"\n Compute the inverse of a permutation.\n\n Args:\n - permutation (list): List representing a permutation.\n\n Returns:\n - list: Inverse permutation.\n\n \"\"\"\n inverse = [0] * len(permutation)\n for original_idx, permuted_idx in enumerate(permutation):\n inverse[permuted_idx] = original_idx\n return inverse"
}
] | import numpy as np
import ember
from .hook import Hook
from ._utils import numpy_unpad, inv_permutation | 742 |
def _T(t):
"""
Transpose operation on the input tensor.
Args:
- t: Input tensor.
Returns:
- Tensor: Resultant tensor with the transpose operation applied.
"""
t = ember.to_tensor(t) # Convert the input tensor to a Tensor
data = t.data.T # Transpose operation
requires_grad = t.requires_grad # Set requires_grad based on input tensor
hooks = []
# Register a hook for gradient computation if the input tensor requires it
if requires_grad:
|
def _T(t):
"""
Transpose operation on the input tensor.
Args:
- t: Input tensor.
Returns:
- Tensor: Resultant tensor with the transpose operation applied.
"""
t = ember.to_tensor(t) # Convert the input tensor to a Tensor
data = t.data.T # Transpose operation
requires_grad = t.requires_grad # Set requires_grad based on input tensor
hooks = []
# Register a hook for gradient computation if the input tensor requires it
if requires_grad: | hooks.append(Hook(t, lambda grad: grad.T)) | 0 | 2023-12-23 23:11:58+00:00 | 2k |
Hassi34/iot-device-identification | src/stage_03_preprocess_data.py | [
{
"identifier": "read_yaml",
"path": "src/utils/common.py",
"snippet": "def read_yaml(path_to_yaml: str) -> dict:\n with open(path_to_yaml) as yaml_file:\n content = yaml.safe_load(yaml_file)\n return content"
},
{
"identifier": "get_logger",
"path": "src/utils/sys_logging.py",
"snippet": "def get_logger(logs_filepath: str):\n logger.add(\n logs_filepath,\n format=\"{time} | {level} | {name}.{module}:{line} | {message}\",\n level=\"DEBUG\",\n rotation=\"10 KB\",\n retention=\"10 days\",\n compression=\"zip\",\n colorize=True,\n enqueue=True,\n catch=True,\n encoding=\"utf-8\",\n )\n return logger"
},
{
"identifier": "write_dict_to_yaml",
"path": "src/utils/common.py",
"snippet": "def write_dict_to_yaml(dict_input: dict, yaml_file_path: str):\n try:\n current_file_data = read_yaml(yaml_file_path)\n current_file_data.update(dict_input)\n with open(yaml_file_path, \"w\") as f:\n yaml.dump(current_file_data, f)\n except (FileNotFoundError , AttributeError):\n with open(yaml_file_path, \"w\") as f:\n yaml.dump(dict_input, f)"
},
{
"identifier": "gzip_np_arr",
"path": "src/utils/data_ops.py",
"snippet": "def gzip_np_arr(np_array: np.ndarray, filepath: str):\n with gzip.GzipFile(filepath, \"w\") as f:\n np.save(file=f, arr=np_array)"
},
{
"identifier": "get_fitted_pipeline",
"path": "src/utils/data_ops.py",
"snippet": "def get_fitted_pipeline(df, columns, KNN_IMPUTER_NEIGHBORS: int = 3):\n ct = ColumnTransformer(\n transformers=[(\"input_features\", \"passthrough\", columns)], remainder=\"drop\"\n )\n imputer = KNNImputer(n_neighbors=KNN_IMPUTER_NEIGHBORS)\n scaler = StandardScaler()\n\n pipeline = Pipeline(\n steps=[(\"select_columns\", ct), (\"imputer\", imputer), (\"scaler\", scaler)]\n )\n\n return pipeline.fit(df)"
}
] | import argparse
import joblib
import pandas as pd
from src.utils.common import read_yaml
from src.utils.sys_logging import get_logger
from sklearn.preprocessing import LabelEncoder
from src.utils.common import write_dict_to_yaml
from src.utils.data_ops import gzip_np_arr
from sklearn.model_selection import train_test_split
from src.utils.data_ops import get_fitted_pipeline
from pathlib import Path | 1,022 |
STAGE = "Preprocess Data"
def preprocess_data():
complete_df = pd.read_parquet(RAW_DATA_FILE_PATH)
logger.info(
f'The raw data file has been loaded from "{RAW_DATA_FILE_PATH}" with the shape "{complete_df.shape}"'
)
duplicate_rows = complete_df.duplicated().sum()
if duplicate_rows > 0:
logger.warning(
f"Found {duplicate_rows} duplicate rows, removing duplicate rows..."
)
complete_df = complete_df.drop_duplicates(keep="first")
X = complete_df.drop([TARGET_COLUMN_NAME], axis=1)
y = complete_df[TARGET_COLUMN_NAME]
feature_cols = params["input_features_schema"]
feature_cols = list(feature_cols.keys())
logger.info(f"Read {len(feature_cols)} feature columns from params")
data_processing_pipeline = get_fitted_pipeline(
X, feature_cols, KNN_IMPUTER_NEIGHBORS=KNN_IMPUTER_NEIGHBORS
)
Path(DATA_PREPROCESSING_PIPELINE_FILE_PATH).parent.absolute().mkdir(parents=True, exist_ok=True)
joblib.dump(data_processing_pipeline, DATA_PREPROCESSING_PIPELINE_FILE_PATH, compress=1)
logger.info(f"Saved the preprocessing pipeline to {DATA_PREPROCESSING_PIPELINE_FILE_PATH}")
data_processing_pipeline = joblib.load(DATA_PREPROCESSING_PIPELINE_FILE_PATH)
data_processing_pipeline
data_processing_pipeline = joblib.load(DATA_PREPROCESSING_PIPELINE_FILE_PATH)
logger.info(
f'Loaded sklearn data preprocessing pipeline from "{DATA_PREPROCESSING_PIPELINE_FILE_PATH}"'
)
X_transformed = data_processing_pipeline.transform(X)
logger.info(f'Dataframe shape after transformation is "{X_transformed.shape}"')
le = LabelEncoder()
le.fit(y)
labels_mapping_dict = {"labels_mapping": ""}
le_dict = dict(zip(le.transform(le.classes_), le.classes_))
le_dict = {int(k): v for k, v in le_dict.items()}
labels_mapping_dict["labels_mapping"] = le_dict
logger.info(f"Label encoding map has the dictionary: {le_dict}")
write_dict_to_yaml(labels_mapping_dict, parsed_args.params)
logger.info(f'Updated the label encoding map in the file at "{parsed_args.params}"')
|
STAGE = "Preprocess Data"
def preprocess_data():
complete_df = pd.read_parquet(RAW_DATA_FILE_PATH)
logger.info(
f'The raw data file has been loaded from "{RAW_DATA_FILE_PATH}" with the shape "{complete_df.shape}"'
)
duplicate_rows = complete_df.duplicated().sum()
if duplicate_rows > 0:
logger.warning(
f"Found {duplicate_rows} duplicate rows, removing duplicate rows..."
)
complete_df = complete_df.drop_duplicates(keep="first")
X = complete_df.drop([TARGET_COLUMN_NAME], axis=1)
y = complete_df[TARGET_COLUMN_NAME]
feature_cols = params["input_features_schema"]
feature_cols = list(feature_cols.keys())
logger.info(f"Read {len(feature_cols)} feature columns from params")
data_processing_pipeline = get_fitted_pipeline(
X, feature_cols, KNN_IMPUTER_NEIGHBORS=KNN_IMPUTER_NEIGHBORS
)
Path(DATA_PREPROCESSING_PIPELINE_FILE_PATH).parent.absolute().mkdir(parents=True, exist_ok=True)
joblib.dump(data_processing_pipeline, DATA_PREPROCESSING_PIPELINE_FILE_PATH, compress=1)
logger.info(f"Saved the preprocessing pipeline to {DATA_PREPROCESSING_PIPELINE_FILE_PATH}")
data_processing_pipeline = joblib.load(DATA_PREPROCESSING_PIPELINE_FILE_PATH)
data_processing_pipeline
data_processing_pipeline = joblib.load(DATA_PREPROCESSING_PIPELINE_FILE_PATH)
logger.info(
f'Loaded sklearn data preprocessing pipeline from "{DATA_PREPROCESSING_PIPELINE_FILE_PATH}"'
)
X_transformed = data_processing_pipeline.transform(X)
logger.info(f'Dataframe shape after transformation is "{X_transformed.shape}"')
le = LabelEncoder()
le.fit(y)
labels_mapping_dict = {"labels_mapping": ""}
le_dict = dict(zip(le.transform(le.classes_), le.classes_))
le_dict = {int(k): v for k, v in le_dict.items()}
labels_mapping_dict["labels_mapping"] = le_dict
logger.info(f"Label encoding map has the dictionary: {le_dict}")
write_dict_to_yaml(labels_mapping_dict, parsed_args.params)
logger.info(f'Updated the label encoding map in the file at "{parsed_args.params}"') | labels_dict = read_yaml(parsed_args.params)["labels_mapping"] | 0 | 2023-12-25 10:40:19+00:00 | 2k |
see2023/Bert-VITS2-ext | for_deploy/infer_utils.py | [
{
"identifier": "config",
"path": "config.py",
"snippet": "class Resample_config:\nclass Preprocess_text_config:\nclass Bert_gen_config:\nclass Emo_gen_config:\nclass Train_ms_config:\nclass Webui_config:\nclass Server_config:\nclass Translate_config:\nclass Config:\n def __init__(self, in_dir: str, out_dir: str, sampling_rate: int = 44100):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n transcription_path: str,\n cleaned_path: str,\n train_path: str,\n val_path: str,\n config_path: str,\n val_per_lang: int = 5,\n max_val_total: int = 10000,\n clean: bool = True,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n env: Dict[str, any],\n base: Dict[str, any],\n model: str,\n num_workers: int,\n spec_cache: bool,\n keep_ckpts: int,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n device: str,\n model: str,\n v_model: str,\n config_path: str,\n language_identification_library: str,\n port: int = 7860,\n share: bool = False,\n debug: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self, models: List[Dict[str, any]], port: int = 5000, device: str = \"cuda\"\n ):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, app_key: str, secret_key: str):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, config_path: str):"
},
{
"identifier": "text2sep_kata",
"path": "text/japanese.py",
"snippet": "def text2sep_kata(text: str) -> (list, list):\n parsed = pyopenjtalk.run_frontend(text)\n\n res = []\n sep = []\n for parts in parsed:\n word, yomi = replace_punctuation(parts[\"string\"]), parts[\"pron\"].replace(\n \"’\", \"\"\n )\n if yomi:\n if re.match(_MARKS, yomi):\n if len(word) > 1:\n word = [replace_punctuation(i) for i in list(word)]\n yomi = word\n res += yomi\n sep += word\n continue\n elif word not in rep_map.keys() and word not in rep_map.values():\n word = \",\"\n yomi = word\n res.append(yomi)\n else:\n if word in _SYMBOL_TOKENS:\n res.append(word)\n elif word in (\"っ\", \"ッ\"):\n res.append(\"ッ\")\n elif word in _NO_YOMI_TOKENS:\n pass\n else:\n res.append(word)\n sep.append(word)\n return sep, [hira2kata(i) for i in res], get_accent(parsed)"
}
] | import sys
import torch
from transformers import (
AutoModelForMaskedLM,
AutoTokenizer,
DebertaV2Model,
DebertaV2Tokenizer,
ClapModel,
ClapProcessor,
)
from config import config
from text.japanese import text2sep_kata | 1,223 |
class BertFeature:
def __init__(self, model_path, language="ZH"):
self.model_path = model_path
self.language = language
self.tokenizer = None
self.model = None
self.device = None
self._prepare()
def _get_device(self, device=config.bert_gen_config.device):
if (
sys.platform == "darwin"
and torch.backends.mps.is_available()
and device == "cpu"
):
device = "mps"
if not device:
device = "cuda"
return device
def _prepare(self):
self.device = self._get_device()
if self.language == "EN":
self.tokenizer = DebertaV2Tokenizer.from_pretrained(self.model_path)
self.model = DebertaV2Model.from_pretrained(self.model_path).to(self.device)
else:
self.tokenizer = AutoTokenizer.from_pretrained(self.model_path)
self.model = AutoModelForMaskedLM.from_pretrained(self.model_path).to(
self.device
)
self.model.eval()
def get_bert_feature(self, text, word2ph):
if self.language == "JP":
|
class BertFeature:
def __init__(self, model_path, language="ZH"):
self.model_path = model_path
self.language = language
self.tokenizer = None
self.model = None
self.device = None
self._prepare()
def _get_device(self, device=config.bert_gen_config.device):
if (
sys.platform == "darwin"
and torch.backends.mps.is_available()
and device == "cpu"
):
device = "mps"
if not device:
device = "cuda"
return device
def _prepare(self):
self.device = self._get_device()
if self.language == "EN":
self.tokenizer = DebertaV2Tokenizer.from_pretrained(self.model_path)
self.model = DebertaV2Model.from_pretrained(self.model_path).to(self.device)
else:
self.tokenizer = AutoTokenizer.from_pretrained(self.model_path)
self.model = AutoModelForMaskedLM.from_pretrained(self.model_path).to(
self.device
)
self.model.eval()
def get_bert_feature(self, text, word2ph):
if self.language == "JP": | text = "".join(text2sep_kata(text)[0]) | 1 | 2023-12-27 03:09:11+00:00 | 2k |
chinhsuanwu/ifusion-threestudio | threestudio/models/materials/no_material.py | [
{
"identifier": "BaseMaterial",
"path": "threestudio/models/materials/base.py",
"snippet": "class BaseMaterial(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n requires_normal: bool = False\n requires_tangent: bool = False\n\n def configure(self):\n pass\n\n def forward(self, *args, **kwargs) -> Float[Tensor, \"*B 3\"]:\n raise NotImplementedError\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}"
},
{
"identifier": "get_encoding",
"path": "threestudio/models/networks.py",
"snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n elif config.otype == \"HashGridSpatialTime\":\n encoding = TCNNEncodingSpatialTime(n_input_dims, config) # 4D-fy encoding\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding"
},
{
"identifier": "get_mlp",
"path": "threestudio/models/networks.py",
"snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network"
},
{
"identifier": "dot",
"path": "threestudio/utils/ops.py",
"snippet": "def dot(x, y):\n return torch.sum(x * y, -1, keepdim=True)"
},
{
"identifier": "get_activation",
"path": "threestudio/utils/ops.py",
"snippet": "def get_activation(name) -> Callable:\n if name is None:\n return lambda x: x\n name = name.lower()\n if name == \"none\":\n return lambda x: x\n elif name == \"lin2srgb\":\n return lambda x: torch.where(\n x > 0.0031308,\n torch.pow(torch.clamp(x, min=0.0031308), 1.0 / 2.4) * 1.055 - 0.055,\n 12.92 * x,\n ).clamp(0.0, 1.0)\n elif name == \"exp\":\n return lambda x: torch.exp(x)\n elif name == \"shifted_exp\":\n return lambda x: torch.exp(x - 1.0)\n elif name == \"trunc_exp\":\n return trunc_exp\n elif name == \"shifted_trunc_exp\":\n return lambda x: trunc_exp(x - 1.0)\n elif name == \"sigmoid\":\n return lambda x: torch.sigmoid(x)\n elif name == \"tanh\":\n return lambda x: torch.tanh(x)\n elif name == \"shifted_softplus\":\n return lambda x: F.softplus(x - 1.0)\n elif name == \"scale_-11_01\":\n return lambda x: x * 0.5 + 0.5\n else:\n try:\n return getattr(F, name)\n except AttributeError:\n raise ValueError(f\"Unknown activation function: {name}\")"
}
] | import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio
from dataclasses import dataclass, field
from threestudio.models.materials.base import BaseMaterial
from threestudio.models.networks import get_encoding, get_mlp
from threestudio.utils.ops import dot, get_activation
from threestudio.utils.typing import * | 1,291 |
@threestudio.register("no-material")
class NoMaterial(BaseMaterial):
@dataclass
class Config(BaseMaterial.Config):
n_output_dims: int = 3
color_activation: str = "sigmoid"
input_feature_dims: Optional[int] = None
mlp_network_config: Optional[dict] = None
requires_normal: bool = False
cfg: Config
def configure(self) -> None:
self.use_network = False
if (
self.cfg.input_feature_dims is not None
and self.cfg.mlp_network_config is not None
):
self.network = get_mlp(
self.cfg.input_feature_dims,
self.cfg.n_output_dims,
self.cfg.mlp_network_config,
)
self.use_network = True
self.requires_normal = self.cfg.requires_normal
def forward(
self, features: Float[Tensor, "B ... Nf"], **kwargs
) -> Float[Tensor, "B ... Nc"]:
if not self.use_network:
assert (
features.shape[-1] == self.cfg.n_output_dims
), f"Expected {self.cfg.n_output_dims} output dims, only got {features.shape[-1]} dims input."
|
@threestudio.register("no-material")
class NoMaterial(BaseMaterial):
@dataclass
class Config(BaseMaterial.Config):
n_output_dims: int = 3
color_activation: str = "sigmoid"
input_feature_dims: Optional[int] = None
mlp_network_config: Optional[dict] = None
requires_normal: bool = False
cfg: Config
def configure(self) -> None:
self.use_network = False
if (
self.cfg.input_feature_dims is not None
and self.cfg.mlp_network_config is not None
):
self.network = get_mlp(
self.cfg.input_feature_dims,
self.cfg.n_output_dims,
self.cfg.mlp_network_config,
)
self.use_network = True
self.requires_normal = self.cfg.requires_normal
def forward(
self, features: Float[Tensor, "B ... Nf"], **kwargs
) -> Float[Tensor, "B ... Nc"]:
if not self.use_network:
assert (
features.shape[-1] == self.cfg.n_output_dims
), f"Expected {self.cfg.n_output_dims} output dims, only got {features.shape[-1]} dims input." | color = get_activation(self.cfg.color_activation)(features) | 4 | 2023-12-27 20:30:33+00:00 | 2k |
jasursadikov/mud | commands.py | [
{
"identifier": "TEXT",
"path": "utils.py",
"snippet": "TEXT = {\n 'white': '\\033[37m',\n 'gray': '\\033[90m',\n 'black': '\\033[30m',\n 'red': '\\033[31m',\n 'green': '\\033[32m',\n 'yellow': '\\033[33m',\n 'blue': '\\033[34m',\n 'magenta': '\\033[35m',\n 'cyan': '\\033[36m',\n 'bright_white': '\\033[97m',\n 'bright_red': '\\033[91m',\n 'bright_green': '\\033[92m',\n 'bright_yellow': '\\033[93m',\n 'bright_blue': '\\033[94m',\n 'bright_magenta': '\\033[95m',\n 'bright_cyan': '\\033[96m',\n}"
},
{
"identifier": "BACK",
"path": "utils.py",
"snippet": "BACK = {\n 'white': '\\033[47m',\n 'medium_gray': '\\033[100m',\n 'black': '\\033[40m',\n 'red': '\\033[41m',\n 'green': '\\033[42m',\n 'yellow': '\\033[43m',\n 'blue': '\\033[44m',\n 'magenta': '\\033[45m',\n 'cyan': '\\033[46m',\n 'bright_white': '\\033[107m',\n 'bright_red': '\\033[101m',\n 'bright_green': '\\033[102m',\n 'bright_yellow': '\\033[103m',\n 'bright_blue': '\\033[104m',\n 'bright_magenta': '\\033[105m',\n 'bright_cyan': '\\033[106m',\n}"
},
{
"identifier": "RESET",
"path": "utils.py",
"snippet": "RESET = '\\033[0m'"
},
{
"identifier": "STYLES",
"path": "utils.py",
"snippet": "STYLES = {\n 'bold': '\\033[1m',\n 'dim': '\\033[2m',\n 'italic': '\\033[3m',\n 'underline': '\\033[4m',\n 'blink': '\\033[5m',\n}"
},
{
"identifier": "END_STYLES",
"path": "utils.py",
"snippet": "END_STYLES = {\n 'bold': '\\033[22m',\n 'dim': '\\033[22m',\n 'italic': '\\033[23m',\n 'underline': '\\033[24m',\n 'blink': '\\033[25m',\n}"
},
{
"identifier": "glyph",
"path": "utils.py",
"snippet": "def glyph(key: str) -> str:\n return GLYPHS[key][0] if settings.mud_settings['nerd_fonts'] else GLYPHS[key][1]"
}
] | import utils
import asyncio
import subprocess
from utils import TEXT, BACK, RESET, STYLES, END_STYLES, glyph
from typing import List, Dict
from collections import Counter
from prettytable import PrettyTable, PLAIN_COLUMNS | 880 |
class Commands:
def __init__(self, repos):
self.repos = repos
self.label_color_cache = {}
self.current_color_index = 0
# `mud status` command implementation
def status(self, repos: Dict[str, List[str]]) -> None:
table = self._get_table()
for path, tags in repos.items():
formatted_path = self._get_formatted_path(path)
branch = self._get_branch_status(path)
author = self._get_authors_name(path)
commit = self._get_commit_message(path, 30)
colored_labels = self._get_formatted_labels(tags)
# Sync with origin status
ahead_behind_cmd = subprocess.run(['git', 'rev-list', '--left-right', '--count', 'HEAD...@{upstream}'],
text=True, cwd=path, capture_output=True)
stdout = ahead_behind_cmd.stdout.strip().split()
if len(stdout) >= 2:
ahead, behind = stdout[0], stdout[1]
origin_sync = ''
if ahead and ahead != '0':
|
class Commands:
def __init__(self, repos):
self.repos = repos
self.label_color_cache = {}
self.current_color_index = 0
# `mud status` command implementation
def status(self, repos: Dict[str, List[str]]) -> None:
table = self._get_table()
for path, tags in repos.items():
formatted_path = self._get_formatted_path(path)
branch = self._get_branch_status(path)
author = self._get_authors_name(path)
commit = self._get_commit_message(path, 30)
colored_labels = self._get_formatted_labels(tags)
# Sync with origin status
ahead_behind_cmd = subprocess.run(['git', 'rev-list', '--left-right', '--count', 'HEAD...@{upstream}'],
text=True, cwd=path, capture_output=True)
stdout = ahead_behind_cmd.stdout.strip().split()
if len(stdout) >= 2:
ahead, behind = stdout[0], stdout[1]
origin_sync = ''
if ahead and ahead != '0': | origin_sync += f'{TEXT["bright_green"]}{glyph("ahead")} {ahead}{RESET}' | 5 | 2023-12-28 13:09:31+00:00 | 2k |
Q-MM/PureMM | model/PureMM_arch.py | [
{
"identifier": "build_vision_tower",
"path": "model/multimodal_encoder/builder.py",
"snippet": "def build_vision_tower(vision_tower_cfg, **kwargs):\n vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None))\n is_absolute_path_exists = os.path.exists(vision_tower)\n\n if is_absolute_path_exists or vision_tower.startswith(\"openai\") or vision_tower.startswith(\"laion\"):\n return CLIPVisionTower(vision_tower, args=vision_tower_cfg, **kwargs)\n\n raise ValueError(f'Unknown vision tower: {vision_tower}')"
},
{
"identifier": "build_vision_projector",
"path": "model/multimodal_projector/builder.py",
"snippet": "def build_vision_projector(config, delay_load=False, **kwargs):\n projector_type = getattr(config, 'mm_projector_type', 'linear')\n\n if projector_type == 'linear':\n return nn.Linear(config.mm_hidden_size, config.hidden_size)\n\n mlp_gelu_match = re.match(r'^mlp(\\d+)x_gelu$', projector_type)\n if mlp_gelu_match:\n mlp_depth = int(mlp_gelu_match.group(1))\n modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)]\n for _ in range(1, mlp_depth):\n modules.append(nn.GELU())\n modules.append(nn.Linear(config.hidden_size, config.hidden_size))\n return nn.Sequential(*modules)\n\n larger_mlp_gelu_match = re.match(r'^larger_mlp(\\d+)x_gelu$', projector_type)\n if larger_mlp_gelu_match:\n mlp_depth = int(mlp_gelu_match.group(1))\n modules = [nn.Linear(config.mm_hidden_size, config.mm_hidden_size)]\n for _ in range(1, mlp_depth-1):\n modules.append(nn.GELU())\n modules.append(nn.Linear(config.mm_hidden_size, config.mm_hidden_size))\n modules.append(nn.Linear(config.mm_hidden_size, config.hidden_size))\n return nn.Sequential(*modules)\n\n if projector_type == 'identity':\n return IdentityMap()\n\n raise ValueError(f'Unknown projector type: {projector_type}')"
}
] | from abc import ABC, abstractmethod
from .multimodal_encoder.builder import build_vision_tower
from .multimodal_projector.builder import build_vision_projector
import torch
import torch.nn as nn | 837 | # Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
IGNORE_INDEX = -100
IMAGE_TOKEN_INDEX = -200
DEFAULT_IMAGE_TOKEN = "<image>"
DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
DEFAULT_IM_START_TOKEN = "<im_start>"
DEFAULT_IM_END_TOKEN = "<im_end>"
def rank0_print(rank, *args):
if rank == 0:
print(*args)
class PureMMMetaModel:
def __init__(self, config):
super(PureMMMetaModel, self).__init__(config)
if hasattr(config, "mm_vision_tower"):
self.vision_tower = build_vision_tower(config, delay_load=True)
# self.mm_projector = nn.Linear(config.mm_hidden_size, config.hidden_size)
| # Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
IGNORE_INDEX = -100
IMAGE_TOKEN_INDEX = -200
DEFAULT_IMAGE_TOKEN = "<image>"
DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
DEFAULT_IM_START_TOKEN = "<im_start>"
DEFAULT_IM_END_TOKEN = "<im_end>"
def rank0_print(rank, *args):
if rank == 0:
print(*args)
class PureMMMetaModel:
def __init__(self, config):
super(PureMMMetaModel, self).__init__(config)
if hasattr(config, "mm_vision_tower"):
self.vision_tower = build_vision_tower(config, delay_load=True)
# self.mm_projector = nn.Linear(config.mm_hidden_size, config.hidden_size) | self.mm_projector = build_vision_projector(config) | 1 | 2023-12-27 09:54:09+00:00 | 2k |
Ananya2001-an/spotify-py-sdk | tests/endpoints/test_recommendations.py | [
{
"identifier": "SpotifyApi",
"path": "spotify_py_sdk/spotify_api.py",
"snippet": "class SpotifyApi:\n \"\"\"Create an api instance and call the various endpoint methods.\n\n :param client_id: Client_ID for your app\n :type client_id: str\n :param client_secret: Client_Secret for your app\n :type client_secret: str\n :param config: pass :class:`SdkConfig` instance, defaults to None\n :type config: :class:`SdkConfig`, optional\n \"\"\"\n _root_url: str = \"https://api.spotify.com/v1/\"\n\n def __init__(self, client_id: str, client_secret: str, config: Optional[SdkConfig] = None):\n \"\"\"Constructor method\n \"\"\"\n self.access_token_manager: AccessTokenManager = AccessTokenManager(client_id, client_secret)\n self.sdk_config: Optional[SdkConfig] = config\n self.albums: Albums = Albums(self)\n self.artists: Artists = Artists(self)\n self.audiobooks: Audiobooks = Audiobooks(self)\n self.browse: Browse = Browse(self)\n self.chapters: Chapters = Chapters(self)\n self.episodes: Episodes = Episodes(self)\n self.recommendations: Recommendations = Recommendations(self)\n self.markets: Markets = Markets(self)\n # self.player: Player = Player(self) # need different auth strategy; yet to be implemented\n self.playlists: Playlists = Playlists(self)\n self.shows: Shows = Shows(self)\n self.tracks: Tracks = Tracks(self)\n self.users: Users = Users(self)\n self.search: Search = Search(self)\n # self.current_user: CurrentUser = CurrentUser(self) # need different auth strategy; yet to be implemented\n\n @classmethod\n def fetch_results(cls, url: str, opts: dict):\n \"\"\"Fetch results by making a request to the given URL\n \"\"\"\n try:\n result = requests.request(method=opts[\"method\"], url=url, headers=opts[\"headers\"], data=opts[\"body\"])\n return result.json()\n except HTTPError as e:\n raise f\"Failed to fetch result! {e}\"\n\n def make_request(self, method: Literal[\"GET\", \"POST\", \"PUT\", \"DELETE\"], url: str, body: Optional[any] = None,\n content_type: Optional[str] = None):\n \"\"\"Get access token and make necessary request call to the api endpoint\n \"\"\"\n try:\n access_token = self.access_token_manager.get_access_token()\n except HTTPError as e:\n raise \"Access Token not available! Authenticate again.\"\n\n full_url = SpotifyApi._root_url + url\n opts = {\n \"method\": method,\n \"headers\": {\n \"Authorization\": f\"Bearer {access_token}\",\n \"Content-Type\": content_type if content_type else \"application/json\"\n },\n \"body\": json.dumps(body) if body and type(body) is not str else body\n }\n\n try:\n if self.sdk_config:\n if self.sdk_config.before_request:\n self.sdk_config.before_request(full_url, opts)\n if self.sdk_config.fetch:\n result = self.sdk_config.fetch(full_url, opts)\n else:\n result = SpotifyApi.fetch_results(full_url, opts)\n if self.sdk_config.after_request:\n self.sdk_config.after_request(full_url, opts, result)\n return result\n\n return SpotifyApi.fetch_results(full_url, opts)\n except (HTTPError, ValueError, InterruptedError) as e:\n raise e\n # handled = self.sdk_config.error_handler.handleErrors(e)\n # if not handled:\n # raise Exception(\"Failed to make request! Try again.\")"
},
{
"identifier": "RecommendationsRequestRequiredArguments",
"path": "spotify_py_sdk/endpoints/recommendations.py",
"snippet": "class RecommendationsRequestRequiredArguments:\n def __init__(self, seed_artists: Optional[list[str]] = None, seed_genres: Optional[list[str]] = None, seed_tracks: Optional[list[str]] = None):\n self.seed_artists = seed_artists\n self.seed_genres = seed_genres\n self.seed_tracks = seed_tracks"
}
] | import json
import pytest
import os
from spotify_py_sdk import SpotifyApi
from spotify_py_sdk.endpoints.recommendations import RecommendationsRequestRequiredArguments
from dotenv import load_dotenv | 1,007 |
load_dotenv()
@pytest.fixture
def api():
|
load_dotenv()
@pytest.fixture
def api(): | return SpotifyApi(os.getenv("CLIENT_ID"), os.getenv("CLIENT_SECRET")) | 0 | 2023-12-27 20:12:31+00:00 | 2k |
kyleliang919/Optimizer-Zoo | optimizer_zoo/Trainer/utils.py | [
{
"identifier": "AsyncTrainer",
"path": "optimizer_zoo/Trainer/async_trainer.py",
"snippet": "class AsyncTrainer(Trainer):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.accelerator.sync_gradients = None\n\n def training_step(self, model, inputs):\n # make sure the gradient is not automatically synced\n with model.no_sync():\n model.train()\n inputs = self._prepare_inputs(inputs)\n\n if is_sagemaker_mp_enabled():\n loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps)\n return loss_mb.reduce_mean().detach().to(self.args.device)\n\n with self.compute_loss_context_manager():\n loss = self.compute_loss(model, inputs)\n\n if self.args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n\n if self.use_apex:\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n self.accelerator.backward(loss)\n return loss.detach() / self.args.gradient_accumulation_steps"
},
{
"identifier": "AsyncSFTTrainer",
"path": "optimizer_zoo/Trainer/async_trainer.py",
"snippet": "class AsyncSFTTrainer(SFTTrainer):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def training_step(self, model, inputs):\n # make sure the gradient is not automatically synced\n with model.no_sync():\n model.train()\n inputs = self._prepare_inputs(inputs)\n\n if is_sagemaker_mp_enabled():\n loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps)\n return loss_mb.reduce_mean().detach().to(self.args.device)\n\n with self.compute_loss_context_manager():\n loss = self.compute_loss(model, inputs)\n\n if self.args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n\n if self.use_apex:\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n self.accelerator.backward(loss)\n return loss.detach() / self.args.gradient_accumulation_steps"
},
{
"identifier": "AsyncDPOTrainer",
"path": "optimizer_zoo/Trainer/async_trainer.py",
"snippet": "class AsyncDPOTrainer(DPOTrainer):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def training_step(self, model, inputs):\n # make sure the gradient is not automatically synced\n with model.no_sync():\n model.train()\n inputs = self._prepare_inputs(inputs)\n\n if is_sagemaker_mp_enabled():\n loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps)\n return loss_mb.reduce_mean().detach().to(self.args.device)\n\n with self.compute_loss_context_manager():\n loss = self.compute_loss(model, inputs)\n\n if self.args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n\n if self.use_apex:\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n self.accelerator.backward(loss)\n return loss.detach() / self.args.gradient_accumulation_steps"
},
{
"identifier": "AsyncSeq2SeqTrainer",
"path": "optimizer_zoo/Trainer/async_trainer.py",
"snippet": "class AsyncSeq2SeqTrainer(Seq2SeqTrainer):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.accelerator.sync_gradients = None\n\n def training_step(self, model, inputs):\n # make sure the gradient is not automatically synced\n with model.no_sync():\n model.train()\n inputs = self._prepare_inputs(inputs)\n\n if is_sagemaker_mp_enabled():\n loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps)\n return loss_mb.reduce_mean().detach().to(self.args.device)\n\n with self.compute_loss_context_manager():\n loss = self.compute_loss(model, inputs)\n\n if self.args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n\n if self.use_apex:\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n self.accelerator.backward(loss)\n return loss.detach() / self.args.gradient_accumulation_steps "
}
] | from transformers import Trainer, Seq2SeqTrainer
from trl import SFTTrainer, DPOTrainer
from .async_trainer import AsyncTrainer, AsyncSFTTrainer, AsyncDPOTrainer, AsyncSeq2SeqTrainer | 1,215 | def create_trainer(training_args):
if training_args.task == "pretraining":
return AsyncTrainer if training_args.async_grad else Trainer
elif training_args.task == "sft":
return AsyncSFTTrainer if training_args.async_grad else SFTTrainer
elif training_args.task == "dpo":
return AsyncDPOTrainer if training_args.async_grad else DPOTrainer
elif training_args.task == "seq2seq":
| def create_trainer(training_args):
if training_args.task == "pretraining":
return AsyncTrainer if training_args.async_grad else Trainer
elif training_args.task == "sft":
return AsyncSFTTrainer if training_args.async_grad else SFTTrainer
elif training_args.task == "dpo":
return AsyncDPOTrainer if training_args.async_grad else DPOTrainer
elif training_args.task == "seq2seq": | return AsyncSeq2SeqTrainer if training_args.async_grad else Seq2SeqTrainer | 3 | 2023-12-22 17:07:00+00:00 | 2k |
giaminhgist/3D-DAM | lib/model/DuoAttention.py | [
{
"identifier": "SpatialAttention3D",
"path": "lib/model/attention_block.py",
"snippet": "class SpatialAttention3D(nn.Module):\n def __init__(self, out_channel=64, kernel_size=3, stride=1, padding=1):\n super(SpatialAttention3D, self).__init__()\n\n self.conv = nn.Conv3d(2, out_channel,\n kernel_size=kernel_size, stride=stride, padding=padding, bias=False)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n residual = x\n avg_out = torch.mean(x, dim=1, keepdim=True)\n max_out, _ = torch.max(x, dim=1, keepdim=True)\n x = torch.cat([avg_out, max_out], dim=1)\n x = self.conv(x)\n x = self.sigmoid(x)\n out = x * residual\n return out"
},
{
"identifier": "ChannelAttention3D",
"path": "lib/model/attention_block.py",
"snippet": "class ChannelAttention3D(nn.Module):\n def __init__(self, in_planes=64, ratio=8):\n super(ChannelAttention3D, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool3d(1)\n self.max_pool = nn.AdaptiveMaxPool3d(1)\n\n self.fc = nn.Sequential(nn.Conv3d(in_planes, in_planes // ratio, 1, bias=False),\n nn.ReLU(),\n nn.Conv3d(in_planes // ratio, in_planes, 1, bias=False))\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n residual = x\n avg_out = self.fc(self.avg_pool(x))\n max_out = self.fc(self.max_pool(x))\n out = avg_out + max_out\n return self.sigmoid(out) * residual"
},
{
"identifier": "residual_block",
"path": "lib/model/attention_block.py",
"snippet": "class residual_block(nn.Module):\n def __init__(self, channel_size=64):\n super(residual_block, self).__init__()\n\n self.conv = nn.Conv3d(channel_size, channel_size, kernel_size=3, padding=1)\n self.relu = nn.ReLU()\n self.bn = nn.BatchNorm3d(channel_size)\n\n def forward(self, x):\n residual = x\n y = self.conv(x)\n y = self.bn(y)\n y = self.relu(y)\n out = y + residual\n return out"
}
] | import numpy as np
import torch
from torch import nn
from lib.model.attention_block import SpatialAttention3D, ChannelAttention3D, residual_block | 804 |
class DAM(nn.Module):
def __init__(self, channels=64):
super(DAM, self).__init__()
self.sa = SpatialAttention3D(out_channel=channels)
self.ca = ChannelAttention3D(in_planes=channels)
def forward(self, x):
residual = x
out = self.ca(x)
out = self.sa(out)
out = out + residual
return out
class Duo_Attention(nn.Module):
def __init__(
self, input_size=(1, 169, 208, 179), num_classes=3, dropout=0
):
super().__init__()
self.conv = nn.Sequential(
nn.Conv3d(input_size[0], 8, 3, padding=1),
nn.BatchNorm3d(8),
nn.ReLU(),
# nn.MaxPool3d(2, 2),
nn.Conv3d(8, 16, 3, padding=1, stride=2),
nn.BatchNorm3d(16),
nn.ReLU(),
|
class DAM(nn.Module):
def __init__(self, channels=64):
super(DAM, self).__init__()
self.sa = SpatialAttention3D(out_channel=channels)
self.ca = ChannelAttention3D(in_planes=channels)
def forward(self, x):
residual = x
out = self.ca(x)
out = self.sa(out)
out = out + residual
return out
class Duo_Attention(nn.Module):
def __init__(
self, input_size=(1, 169, 208, 179), num_classes=3, dropout=0
):
super().__init__()
self.conv = nn.Sequential(
nn.Conv3d(input_size[0], 8, 3, padding=1),
nn.BatchNorm3d(8),
nn.ReLU(),
# nn.MaxPool3d(2, 2),
nn.Conv3d(8, 16, 3, padding=1, stride=2),
nn.BatchNorm3d(16),
nn.ReLU(), | residual_block(channel_size=16), | 2 | 2023-12-22 10:15:55+00:00 | 2k |
itsluminous/EasyEncryption | script.py | [
{
"identifier": "generate_key",
"path": "core.py",
"snippet": "def generate_key():\n \"\"\"Generate a Fernet key.\"\"\"\n return Fernet.generate_key()"
},
{
"identifier": "encrypt_message",
"path": "core.py",
"snippet": "def encrypt_message(message, key):\n \"\"\"Encrypt a message using the provided key.\"\"\"\n fernet = Fernet(key)\n encrypted = fernet.encrypt(message.encode())\n return encrypted"
},
{
"identifier": "decrypt_message",
"path": "core.py",
"snippet": "def decrypt_message(encrypted_message, key):\n \"\"\"Decrypt an encrypted message using the provided key.\"\"\"\n fernet = Fernet(key)\n decrypted = fernet.decrypt(encrypted_message).decode()\n return decrypted"
},
{
"identifier": "encrypt_file",
"path": "core.py",
"snippet": "def encrypt_file(file_path, key):\n \"\"\"Encrypt a file using the provided key.\"\"\"\n try:\n with open(file_path, 'r', encoding='utf-8') as file:\n content = file.read()\n encrypted_content = encrypt_message(content, key)\n with open(file_path + '.enc', 'wb') as encrypted_file:\n encrypted_file.write(encrypted_content)\n print(f\"\\nFile '{file_path}' encrypted successfully.\")\n except FileNotFoundError:\n print(\"\\nFile not found.\")"
},
{
"identifier": "decrypt_file",
"path": "core.py",
"snippet": "def decrypt_file(file_path, key):\n \"\"\"Decrypt an encrypted file using the provided key.\"\"\"\n try:\n with open(file_path, 'rb', encoding='utf-8') as file:\n encrypted_content = file.read()\n decrypted_content = decrypt_message(encrypted_content, key)\n decrypted_file_path = file_path[:-4] # Remove the '.enc' extension\n with open(decrypted_file_path, 'w', encoding='utf-8') as decrypted_file:\n decrypted_file.write(decrypted_content)\n print(f\"\\nFile '{file_path}' decrypted successfully.\")\n except FileNotFoundError:\n print(\"\\nFile not found.\")\n except ValueError:\n print(\"\\nInvalid decryption key or file content.\")"
}
] | from core import generate_key, encrypt_message, decrypt_message, encrypt_file, decrypt_file | 783 | """
Script providing a user interface for encryption and decryption operations.
"""
def generate_new_key():
"""
Generate a new encryption key.
Returns:
- bytes: New encryption key.
"""
key = generate_key()
print(f"\nGenerated Key: {key.decode()}")
return key
def enter_user_key():
"""
Prompt user to enter a key.
Returns:
- bytes: User-entered key.
"""
print("\nEnter the key:")
return input().encode()
def encrypt_user_message(key):
"""
Encrypt a user-entered message.
Parameters:
- key (bytes): Encryption key.
"""
if key is None:
print("\nPlease generate or enter a key first.")
else:
print("\nEnter a message to encrypt (press Enter twice to finish):")
lines = []
while True:
line = input()
if not line:
break
lines.append(line)
user_input = '\n'.join(lines)
encrypted_message = encrypt_message(user_input, key)
print(f"\nEncrypted message: {encrypted_message}")
def decrypt_user_message(key):
"""
Decrypt a user-entered message.
Parameters:
- key (bytes): Decryption key.
"""
if key is None:
print("\nPlease generate or enter a key first.")
else:
print("\nEnter the encrypted message (press Enter twice to finish):")
lines = []
while True:
line = input()
if not line:
break
lines.append(line)
encrypted_input = '\n'.join(lines)
| """
Script providing a user interface for encryption and decryption operations.
"""
def generate_new_key():
"""
Generate a new encryption key.
Returns:
- bytes: New encryption key.
"""
key = generate_key()
print(f"\nGenerated Key: {key.decode()}")
return key
def enter_user_key():
"""
Prompt user to enter a key.
Returns:
- bytes: User-entered key.
"""
print("\nEnter the key:")
return input().encode()
def encrypt_user_message(key):
"""
Encrypt a user-entered message.
Parameters:
- key (bytes): Encryption key.
"""
if key is None:
print("\nPlease generate or enter a key first.")
else:
print("\nEnter a message to encrypt (press Enter twice to finish):")
lines = []
while True:
line = input()
if not line:
break
lines.append(line)
user_input = '\n'.join(lines)
encrypted_message = encrypt_message(user_input, key)
print(f"\nEncrypted message: {encrypted_message}")
def decrypt_user_message(key):
"""
Decrypt a user-entered message.
Parameters:
- key (bytes): Decryption key.
"""
if key is None:
print("\nPlease generate or enter a key first.")
else:
print("\nEnter the encrypted message (press Enter twice to finish):")
lines = []
while True:
line = input()
if not line:
break
lines.append(line)
encrypted_input = '\n'.join(lines) | decrypted_message = decrypt_message(encrypted_input.encode(), key) | 2 | 2023-12-31 13:24:53+00:00 | 2k |
gardenifi/server | tests/api/resource_not_found_test.py | [
{
"identifier": "app",
"path": "app/main_app.py",
"snippet": "INVALID_DATA = \"Invalid data: Unable to process the provided data\"\nclass GlobalVars:\nclass WifiData(BaseModel):\nclass ValveData(BaseModel):\nclass BleData(BaseModel):\n def __init__(self):\n def refresh_set(self):\n def refresh_set(self, value):\nasync def index():\nasync def resource_not_found(request: Request, exc: HTTPException):\nasync def read_ble_data(page: int = None):\nasync def write_ble_data(data: BleData):\nasync def discover_wifi(chunked: int = None, page: int = None):\nasync def save_wifi(data: WifiData):\nasync def turn(data: ValveData):\nasync def check_mqtt():\ndef web_server():\ndef setup_gpio():\ndef parse_arguments():\ndef main():"
},
{
"identifier": "resource_not_found",
"path": "app/main_app.py",
"snippet": "@app.exception_handler(404)\nasync def resource_not_found(request: Request, exc: HTTPException):\n \"\"\"Not found error.\"\"\"\n logger.error(f\"Request: {request}\")\n return JSONResponse(status_code=404, content={\"detail\": str(exc.detail)})"
}
] | import json
import pytest
from fastapi.testclient import TestClient
from fastapi import HTTPException, Request
from fastapi.responses import JSONResponse
from app.main_app import app
from app.main_app import resource_not_found | 712 | """MIT License
Copyright (c) 2023, Marios Karagiannopoulos
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
**Attribution Requirement:**
When using or distributing the software, an attribution to Marios Karagiannopoulos must be included.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
client = TestClient(app)
scope = {"type": "http", "http_version": "1.1", "method": "GET", "path": "/"}
@pytest.fixture(scope="function")
async def request_obj():
"""Request object creation fixture"""
return Request(scope)
class TestResourceNotFound:
"""
Test class for the 'resource_not_found' error handler function.
"""
@pytest.mark.asyncio
async def test_returns_json_response_with_status_code_404_and_detail_of_httpexception(self, obj=request_obj):
"""
Test for returning a JSONResponse object with status code 404 and the detail of the HTTPException passed as an argument.
"""
exc = HTTPException(status_code=404, detail="Not found")
| """MIT License
Copyright (c) 2023, Marios Karagiannopoulos
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
**Attribution Requirement:**
When using or distributing the software, an attribution to Marios Karagiannopoulos must be included.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
client = TestClient(app)
scope = {"type": "http", "http_version": "1.1", "method": "GET", "path": "/"}
@pytest.fixture(scope="function")
async def request_obj():
"""Request object creation fixture"""
return Request(scope)
class TestResourceNotFound:
"""
Test class for the 'resource_not_found' error handler function.
"""
@pytest.mark.asyncio
async def test_returns_json_response_with_status_code_404_and_detail_of_httpexception(self, obj=request_obj):
"""
Test for returning a JSONResponse object with status code 404 and the detail of the HTTPException passed as an argument.
"""
exc = HTTPException(status_code=404, detail="Not found") | response = await resource_not_found(obj, exc) | 1 | 2023-12-22 08:06:09+00:00 | 2k |
xiaoye0x0/pfgo_tg_bot | utils/task/set_args.py | [
{
"identifier": "Task",
"path": "utils/task/model.py",
"snippet": "class Task(metaclass=SingletonMeta):\n def __init__(self, args) -> None:\n self.conf_file = args.config\n\n self.bot_token: str = \"\"\n\n self.pfgo_url: str = \"\"\n self.username: str = \"\"\n self.password: str = \"\"\n self.hide: list = []\n\n self.webhook_url = \"\"\n self.webhook_port = \"\"\n self.running_host = \"\"\n self.running_port = 0\n\n self._init_conf()\n\n def _init_conf(self):\n config = configparser.ConfigParser()\n config.read(self.conf_file)\n self.bot_token = config.get(\"bot\", \"token\")\n\n self.pfgo_url = config.get(\"pfgo\", \"url\")\n self.username = config.get(\"pfgo\", \"username\")\n self.password = config.get(\"pfgo\", \"password\")\n self.hide += config.get(\"pfgo\", \"hide\").split(\",\")\n\n self.webhook_url = config.get(\"webhook\", \"webhook_url\")\n self.webhook_port = config.get(\"webhook\", \"webhook_port\")\n self.running_host = config.get(\"webhook\", \"running_host\")\n self.running_port = int(config.get(\"webhook\", \"running_port\"))"
},
{
"identifier": "Logmanager",
"path": "utils/log.py",
"snippet": "class Logmanager(metaclass=SingletonMeta):\n log_list = []\n log_list_lock = threading.Lock()\n path = \"./\"\n\n def __init__(self, path: str) -> None:\n Logmanager.path = path\n\n @classmethod\n def create_logger(cls, name=None):\n if name is None:\n name = \"default\"\n logger = logging.getLogger(name)\n if name not in cls.log_list:\n with Logmanager.log_list_lock:\n if name not in cls.log_list:\n cls.log_list.append(name)\n logger.setLevel(logging.INFO)\n logfile = f\"{Logmanager.path}/log.log\"\n fh = RotatingFileHandler(\n logfile,\n mode=\"a\",\n maxBytes=1024 * 1024 * 10,\n backupCount=2,\n encoding=\"utf-8\",\n )\n formatter = logging.Formatter(\n \"[%(name)s] [%(asctime)s] [%(levelname)s] %(message)s\",\n \"%Y%m%d-%H:%M:%S\",\n )\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n ch = logging.StreamHandler()\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n fh.close()\n ch.close()\n return logger"
}
] | import os
import argparse
from .model import Task
from ..log import Logmanager | 838 |
def is_file_exists(file_path) -> bool:
r = os.path.exists(file_path)
if not r:
LOGGER.error(f"文件{file_path}不存在")
return r
def create_folder_if_not_exists(folder_path):
if not folder_path:
return
if not os.path.exists(folder_path):
os.makedirs(folder_path)
def parse_command_line_args():
"""
-c --config: 配置文件
--log: 日志存放位置
"""
parser = argparse.ArgumentParser(description="运行参数")
parser.add_argument("--config", "-c", type=str, default="./config.ini", help="配置文件")
parser.add_argument("--log", type=str, default="./", help="日志存放文件夹的位置,默认放到当前路径")
args = parser.parse_args()
# 初始化日志模块
global LOGGER
create_folder_if_not_exists(args.log)
|
def is_file_exists(file_path) -> bool:
r = os.path.exists(file_path)
if not r:
LOGGER.error(f"文件{file_path}不存在")
return r
def create_folder_if_not_exists(folder_path):
if not folder_path:
return
if not os.path.exists(folder_path):
os.makedirs(folder_path)
def parse_command_line_args():
"""
-c --config: 配置文件
--log: 日志存放位置
"""
parser = argparse.ArgumentParser(description="运行参数")
parser.add_argument("--config", "-c", type=str, default="./config.ini", help="配置文件")
parser.add_argument("--log", type=str, default="./", help="日志存放文件夹的位置,默认放到当前路径")
args = parser.parse_args()
# 初始化日志模块
global LOGGER
create_folder_if_not_exists(args.log) | Logmanager(args.log) | 1 | 2023-12-28 08:55:04+00:00 | 2k |
shibing624/chatgpt-webui | src/index_func.py | [
{
"identifier": "local_embedding",
"path": "src/config.py",
"snippet": "def retrieve_openai_api(api_key=None):\ndef retrieve_proxy(proxy=None):\ndef update_doc_config(two_column_pdf):"
},
{
"identifier": "OPENAI_API_BASE",
"path": "src/presets.py",
"snippet": "OPENAI_API_BASE = \"https://api.openai.com/v1\""
},
{
"identifier": "excel_to_string",
"path": "src/utils.py",
"snippet": "def excel_to_string(file_path):\n # 读取Excel文件中的所有工作表\n excel_file = pd.read_excel(file_path, engine=\"openpyxl\", sheet_name=None)\n\n # 初始化结果字符串\n result = []\n\n # 遍历每一个工作表\n for sheet_name, sheet_data in excel_file.items():\n # 处理当前工作表并添加到结果字符串\n result += sheet_to_string(sheet_data, sheet_name=sheet_name)\n\n return result"
},
{
"identifier": "get_files_hash",
"path": "src/utils.py",
"snippet": "def get_files_hash(file_src=None, file_paths=None):\n if file_src:\n file_paths = [x.name for x in file_src]\n file_paths.sort(key=lambda x: os.path.basename(x))\n\n md5_hash = hashlib.md5()\n for file_path in file_paths:\n with open(file_path, \"rb\") as f:\n while chunk := f.read(8192):\n md5_hash.update(chunk)\n\n return md5_hash.hexdigest()"
},
{
"identifier": "load_pkl",
"path": "src/utils.py",
"snippet": "def load_pkl(file_path):\n with open(file_path, 'rb') as f:\n data = pickle.load(f)\n return data"
},
{
"identifier": "save_pkl",
"path": "src/utils.py",
"snippet": "def save_pkl(data, file_path):\n with open(file_path, 'wb') as f:\n pickle.dump(data, f)"
}
] | import os
import re
import PyPDF2
from typing import List, Optional, Any
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from loguru import logger
from tqdm import tqdm
from src.config import local_embedding, retrieve_proxy, chunk_overlap, chunk_size, hf_emb_model_name
from src.presets import OPENAI_API_BASE
from src.utils import excel_to_string, get_files_hash, load_pkl, save_pkl
from src.pdf_func import parse_pdf
from src.config import advance_docs
from langchain.document_loaders import UnstructuredWordDocumentLoader
from langchain.document_loaders import UnstructuredPowerPointLoader
from langchain.document_loaders import UnstructuredEPubLoader
from langchain.document_loaders import TextLoader
from langchain.vectorstores import FAISS
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.embeddings import OpenAIEmbeddings | 1,337 |
pwd_path = os.path.abspath(os.path.dirname(__file__))
class ChineseRecursiveTextSplitter(RecursiveCharacterTextSplitter):
"""Recursive text splitter for Chinese text.
copy from: https://github.com/chatchat-space/Langchain-Chatchat/tree/master
"""
def __init__(
self,
separators: Optional[List[str]] = None,
keep_separator: bool = True,
is_separator_regex: bool = True,
**kwargs: Any,
) -> None:
"""Create a new TextSplitter."""
super().__init__(keep_separator=keep_separator, **kwargs)
self._separators = separators or [
"\n\n",
"\n",
"。|!|?",
"\.\s|\!\s|\?\s",
";|;\s",
",|,\s"
]
self._is_separator_regex = is_separator_regex
@staticmethod
def _split_text_with_regex_from_end(
text: str, separator: str, keep_separator: bool
) -> List[str]:
# Now that we have the separator, split the text
if separator:
if keep_separator:
# The parentheses in the pattern keep the delimiters in the result.
_splits = re.split(f"({separator})", text)
splits = ["".join(i) for i in zip(_splits[0::2], _splits[1::2])]
if len(_splits) % 2 == 1:
splits += _splits[-1:]
else:
splits = re.split(separator, text)
else:
splits = list(text)
return [s for s in splits if s != ""]
def _split_text(self, text: str, separators: List[str]) -> List[str]:
"""Split incoming text and return chunks."""
final_chunks = []
# Get appropriate separator to use
separator = separators[-1]
new_separators = []
for i, _s in enumerate(separators):
_separator = _s if self._is_separator_regex else re.escape(_s)
if _s == "":
separator = _s
break
if re.search(_separator, text):
separator = _s
new_separators = separators[i + 1:]
break
_separator = separator if self._is_separator_regex else re.escape(separator)
splits = self._split_text_with_regex_from_end(text, _separator, self._keep_separator)
# Now go merging things, recursively splitting longer texts.
_good_splits = []
_separator = "" if self._keep_separator else separator
for s in splits:
if self._length_function(s) < self._chunk_size:
_good_splits.append(s)
else:
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
_good_splits = []
if not new_separators:
final_chunks.append(s)
else:
other_info = self._split_text(s, new_separators)
final_chunks.extend(other_info)
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
return [re.sub(r"\n{2,}", "\n", chunk.strip()) for chunk in final_chunks if chunk.strip() != ""]
def get_documents(file_paths):
|
pwd_path = os.path.abspath(os.path.dirname(__file__))
class ChineseRecursiveTextSplitter(RecursiveCharacterTextSplitter):
"""Recursive text splitter for Chinese text.
copy from: https://github.com/chatchat-space/Langchain-Chatchat/tree/master
"""
def __init__(
self,
separators: Optional[List[str]] = None,
keep_separator: bool = True,
is_separator_regex: bool = True,
**kwargs: Any,
) -> None:
"""Create a new TextSplitter."""
super().__init__(keep_separator=keep_separator, **kwargs)
self._separators = separators or [
"\n\n",
"\n",
"。|!|?",
"\.\s|\!\s|\?\s",
";|;\s",
",|,\s"
]
self._is_separator_regex = is_separator_regex
@staticmethod
def _split_text_with_regex_from_end(
text: str, separator: str, keep_separator: bool
) -> List[str]:
# Now that we have the separator, split the text
if separator:
if keep_separator:
# The parentheses in the pattern keep the delimiters in the result.
_splits = re.split(f"({separator})", text)
splits = ["".join(i) for i in zip(_splits[0::2], _splits[1::2])]
if len(_splits) % 2 == 1:
splits += _splits[-1:]
else:
splits = re.split(separator, text)
else:
splits = list(text)
return [s for s in splits if s != ""]
def _split_text(self, text: str, separators: List[str]) -> List[str]:
"""Split incoming text and return chunks."""
final_chunks = []
# Get appropriate separator to use
separator = separators[-1]
new_separators = []
for i, _s in enumerate(separators):
_separator = _s if self._is_separator_regex else re.escape(_s)
if _s == "":
separator = _s
break
if re.search(_separator, text):
separator = _s
new_separators = separators[i + 1:]
break
_separator = separator if self._is_separator_regex else re.escape(separator)
splits = self._split_text_with_regex_from_end(text, _separator, self._keep_separator)
# Now go merging things, recursively splitting longer texts.
_good_splits = []
_separator = "" if self._keep_separator else separator
for s in splits:
if self._length_function(s) < self._chunk_size:
_good_splits.append(s)
else:
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
_good_splits = []
if not new_separators:
final_chunks.append(s)
else:
other_info = self._split_text(s, new_separators)
final_chunks.extend(other_info)
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
return [re.sub(r"\n{2,}", "\n", chunk.strip()) for chunk in final_chunks if chunk.strip() != ""]
def get_documents(file_paths): | text_splitter = ChineseRecursiveTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap) | 0 | 2023-12-27 12:14:26+00:00 | 2k |
ConnectAI-E/GitMaya | server/tasks/lark/pull_request.py | [
{
"identifier": "get_bot_by_application_id",
"path": "server/tasks/lark/base.py",
"snippet": "def get_bot_by_application_id(app_id):\n application = (\n db.session.query(IMApplication)\n .filter(\n or_(\n IMApplication.app_id == app_id,\n IMApplication.id == app_id,\n )\n )\n .first()\n )\n if application:\n return (\n Bot(\n app_id=application.app_id,\n app_secret=application.app_secret,\n ),\n application,\n )\n return None, None"
},
{
"identifier": "get_git_object_by_message_id",
"path": "server/tasks/lark/base.py",
"snippet": "def get_git_object_by_message_id(message_id):\n \"\"\"\n 根据message_id区分Repo、Issue、PullRequest对象\n\n 参数:\n message_id:消息ID\n\n 返回值:\n repo:Repo对象,如果存在\n issue:Issue对象,如果存在\n pr:PullRequest对象,如果存在\n \"\"\"\n issue = (\n db.session.query(Issue)\n .filter(\n Issue.message_id == message_id,\n )\n .first()\n )\n if issue:\n return None, issue, None\n pr = (\n db.session.query(PullRequest)\n .filter(\n PullRequest.message_id == message_id,\n )\n .first()\n )\n if pr:\n return None, None, pr\n repo = (\n db.session.query(Repo)\n .filter(\n Repo.message_id == message_id,\n )\n .first()\n )\n if repo:\n return repo, None, None\n\n return None, None, None"
},
{
"identifier": "with_authenticated_github",
"path": "server/tasks/lark/base.py",
"snippet": "def with_authenticated_github():\n def decorate(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n \"\"\"\n 1. 这个装饰器用来统一处理错误消息\n 2. github rest api调用出错的时候抛出异常\n 3. 这个装饰器捕获特定的异常,给操作者特定的报错消息\n \"\"\"\n try:\n return func(*args, **kwargs)\n except GitHubPermissionError as e:\n try:\n from .manage import send_manage_fail_message\n\n app_id, message_id, content, raw_message = args[-4:]\n host = os.environ.get(\"DOMAIN\")\n send_manage_fail_message(\n f\"[请点击绑定 GitHub 账号后重试]({host}/api/github/oauth)\",\n app_id,\n message_id,\n content,\n raw_message,\n )\n except Exception as e:\n logging.error(e)\n except Exception as e:\n raise e\n\n return wrapper\n\n return decorate"
}
] | import json
import logging
from celery_app import app, celery
from connectai.lark.sdk import FeishuTextMessage
from model.schema import (
ChatGroup,
CodeApplication,
CodeUser,
IMUser,
PullRequest,
Repo,
Team,
TeamMember,
db,
)
from model.team import get_assignees_by_openid
from utils.github.repo import GitHubAppRepo
from utils.lark.pr_card import PullCard
from utils.lark.pr_manual import (
PrManual,
PullRequestDiff,
PullRequestLog,
PullRequestView,
)
from utils.lark.pr_tip_failed import PrTipFailed
from utils.lark.pr_tip_success import PrTipSuccess
from .base import (
get_bot_by_application_id,
get_git_object_by_message_id,
with_authenticated_github,
) | 930 |
@celery.task()
def send_pull_request_failed_tip(
content, app_id, message_id, *args, bot=None, **kwargs
):
"""send new card message to user.
Args:
app_id: IMApplication.app_id.
message_id: lark message id.
content: error message
"""
if not bot:
|
@celery.task()
def send_pull_request_failed_tip(
content, app_id, message_id, *args, bot=None, **kwargs
):
"""send new card message to user.
Args:
app_id: IMApplication.app_id.
message_id: lark message id.
content: error message
"""
if not bot: | bot, _ = get_bot_by_application_id(app_id) | 0 | 2023-12-22 02:43:21+00:00 | 2k |
camenduru/AnyDoor-online-hf | dinov2/dinov2/layers/block.py | [
{
"identifier": "Attention",
"path": "dinov2/dinov2/layers/attention.py",
"snippet": "class Attention(nn.Module):\n def __init__(\n self,\n dim: int,\n num_heads: int = 8,\n qkv_bias: bool = False,\n proj_bias: bool = True,\n attn_drop: float = 0.0,\n proj_drop: float = 0.0,\n ) -> None:\n super().__init__()\n self.num_heads = num_heads\n head_dim = dim // num_heads\n self.scale = head_dim**-0.5\n\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(dim, dim, bias=proj_bias)\n self.proj_drop = nn.Dropout(proj_drop)\n\n def forward(self, x: Tensor) -> Tensor:\n B, N, C = x.shape\n qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n\n q, k, v = qkv[0] * self.scale, qkv[1], qkv[2]\n attn = q @ k.transpose(-2, -1)\n\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n\n x = (attn @ v).transpose(1, 2).reshape(B, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x"
},
{
"identifier": "MemEffAttention",
"path": "dinov2/dinov2/layers/attention.py",
"snippet": "class MemEffAttention(Attention):\n def forward(self, x: Tensor, attn_bias=None) -> Tensor:\n if not XFORMERS_AVAILABLE:\n assert attn_bias is None, \"xFormers is required for nested tensors usage\"\n return super().forward(x)\n\n B, N, C = x.shape\n qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)\n\n q, k, v = unbind(qkv, 2)\n\n if attn_bias is not None:\n self_att_op = fmha.MemoryEfficientAttentionFlashAttentionOp\n else:\n self_att_op = None\n x = memory_efficient_attention(q, k, v, attn_bias=attn_bias, op=self_att_op)\n x = x.reshape([B, N, C])\n\n x = self.proj(x)\n x = self.proj_drop(x)\n return x"
},
{
"identifier": "DropPath",
"path": "dinov2/dinov2/layers/drop_path.py",
"snippet": "class DropPath(nn.Module):\n \"\"\"Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\"\"\"\n\n def __init__(self, drop_prob=None):\n super(DropPath, self).__init__()\n self.drop_prob = drop_prob\n\n def forward(self, x):\n return drop_path(x, self.drop_prob, self.training)"
},
{
"identifier": "LayerScale",
"path": "dinov2/dinov2/layers/layer_scale.py",
"snippet": "class LayerScale(nn.Module):\n def __init__(\n self,\n dim: int,\n init_values: Union[float, Tensor] = 1e-5,\n inplace: bool = False,\n ) -> None:\n super().__init__()\n self.inplace = inplace\n self.gamma = nn.Parameter(init_values * torch.ones(dim))\n\n def forward(self, x: Tensor) -> Tensor:\n return x.mul_(self.gamma) if self.inplace else x * self.gamma"
},
{
"identifier": "Mlp",
"path": "dinov2/dinov2/layers/mlp.py",
"snippet": "class Mlp(nn.Module):\n def __init__(\n self,\n in_features: int,\n hidden_features: Optional[int] = None,\n out_features: Optional[int] = None,\n act_layer: Callable[..., nn.Module] = nn.GELU,\n drop: float = 0.0,\n bias: bool = True,\n ) -> None:\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.fc1 = nn.Linear(in_features, hidden_features, bias=bias)\n self.act = act_layer()\n self.fc2 = nn.Linear(hidden_features, out_features, bias=bias)\n self.drop = nn.Dropout(drop)\n\n def forward(self, x: Tensor) -> Tensor:\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc2(x)\n x = self.drop(x)\n return x"
}
] | import logging
import torch
from typing import Callable, List, Any, Tuple, Dict
from torch import nn, Tensor
from .attention import Attention, MemEffAttention
from .drop_path import DropPath
from .layer_scale import LayerScale
from .mlp import Mlp
from xformers.ops import fmha
from xformers.ops import scaled_index_add, index_select_cat | 1,475 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# References:
# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py
# https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/patch_embed.py
logger = logging.getLogger("dinov2")
try:
XFORMERS_AVAILABLE = True
except ImportError:
logger.warning("xFormers not available")
XFORMERS_AVAILABLE = False
class Block(nn.Module):
def __init__(
self,
dim: int,
num_heads: int,
mlp_ratio: float = 4.0,
qkv_bias: bool = False,
proj_bias: bool = True,
ffn_bias: bool = True,
drop: float = 0.0,
attn_drop: float = 0.0,
init_values=None,
drop_path: float = 0.0,
act_layer: Callable[..., nn.Module] = nn.GELU,
norm_layer: Callable[..., nn.Module] = nn.LayerNorm,
attn_class: Callable[..., nn.Module] = Attention,
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# References:
# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py
# https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/patch_embed.py
logger = logging.getLogger("dinov2")
try:
XFORMERS_AVAILABLE = True
except ImportError:
logger.warning("xFormers not available")
XFORMERS_AVAILABLE = False
class Block(nn.Module):
def __init__(
self,
dim: int,
num_heads: int,
mlp_ratio: float = 4.0,
qkv_bias: bool = False,
proj_bias: bool = True,
ffn_bias: bool = True,
drop: float = 0.0,
attn_drop: float = 0.0,
init_values=None,
drop_path: float = 0.0,
act_layer: Callable[..., nn.Module] = nn.GELU,
norm_layer: Callable[..., nn.Module] = nn.LayerNorm,
attn_class: Callable[..., nn.Module] = Attention, | ffn_layer: Callable[..., nn.Module] = Mlp, | 4 | 2023-12-25 04:48:34+00:00 | 2k |
OmchainFoundation/evm-indexer | tests/test_range.py | [
{
"identifier": "Fetcher",
"path": "evm_indexer/fetcher.py",
"snippet": "class Fetcher:\n def __init__(self, node_endpoint, is_poa=True):\n self.web3 = Web3(Web3.HTTPProvider(node_endpoint))\n if is_poa:\n self.web3.middleware_onion.inject(geth_poa_middleware, layer=0)\n \n if not self.web3.is_connected():\n raise ConnectionError('Could not connect to node at {}'.format(node_endpoint))\n \n def fetch_block(self, block_number):\n try:\n return self.web3.eth.get_block(block_number, full_transactions=True)\n except Exception as e:\n return None\n \n def fetch_latest_block_number(self):\n return self.web3.eth.block_number\n \n def fetch_blocks_in_range(self, start_block, end_block):\n blocks = []\n for block_number in range(start_block, end_block + 1):\n block = self.fetch_block(block_number)\n if block:\n blocks.append(block)\n return blocks\n \n def fetch_transactions_in_block(self, block_number):\n block = self.fetch_block(block_number)\n if block:\n return block['transactions']\n else:\n return None\n \n def fetch_transactions_in_range(self, start_block, end_block):\n transactions = []\n for block_number in range(start_block, end_block + 1):\n print('Fetching block {}'.format(block_number))\n block_transactions = self.fetch_transactions_in_block(block_number)\n if block_transactions:\n transactions.extend(block_transactions)\n return transactions"
},
{
"identifier": "Decoder",
"path": "evm_indexer/decoder.py",
"snippet": "class Decoder:\n def __init__(self, fetcher):\n self.fetcher = fetcher\n self.web3 = fetcher.web3\n \n def get_erc20_transfers_from_tx(self, tx_receipt):\n # Filter the logs for ERC20 Transfer events\n transfer_events = []\n for log in tx_receipt['logs']:\n if log['topics'][0] == ERC20_TRANSFER_EVENT_SIGNATURE_HASH and len(log['topics']) == 3:\n try:\n from_address = self.web3.to_checksum_address('0x' + log['topics'][1][-40:])\n to_address = self.web3.to_checksum_address('0x' + log['topics'][2][-40:])\n token_address = log['address']\n amount = Web3.to_int(hexstr=log['data'])\n\n transfer_events.append({\n 'from': from_address,\n 'to': to_address,\n 'amount': amount,\n 'token_address': token_address\n })\n except BadFunctionCallOutput:\n # Handle error if the log decoding fails\n continue\n return transfer_events\n \n def get_native_transfers_from_tx(self, tx_hash):\n tx = self.web3.eth.get_transaction(tx_hash)\n value = tx['value']\n if value == 0:\n return []\n \n from_address = self.web3.to_checksum_address(tx['from'])\n to_address = self.web3.to_checksum_address(tx['to'])\n return [{\n 'from': from_address,\n 'to': to_address,\n 'amount': value,\n 'token_address': None\n }]"
},
{
"identifier": "InternalTracer",
"path": "evm_indexer/internal_tracer.py",
"snippet": "class InternalTracer:\n def __init__(self, node_endpoint):\n self.node_endpoint = node_endpoint\n \n def get_tx_receipt(self, tx_hash):\n try:\n \n if type(tx_hash) != str:\n tx_hash = Web3.to_hex(tx_hash)\n \n headers = {'Content-Type': 'application/json'}\n payload = {\n \"jsonrpc\": \"2.0\",\n \"id\": 1,\n \"method\": \"eth_getTransactionReceipt\",\n \"params\": [tx_hash]\n }\n\n response = requests.post(self.node_endpoint, headers=headers, data=json.dumps(payload))\n if response.status_code == 200:\n return response.json()\n else:\n return None\n except Exception as e:\n return None\n \n def get_trace(self, tx_hash):\n try:\n headers = {'Content-Type': 'application/json'}\n payload = {\n \"jsonrpc\": \"2.0\",\n \"id\": 1,\n \"method\": \"debug_traceTransaction\",\n \"params\": [\n tx_hash,\n ]\n }\n\n response = requests.post(self.node_endpoint, headers=headers, data=json.dumps(payload))\n if response.status_code == 200:\n return response.json()\n else:\n return None\n except Exception as e:\n return None\n \n def capture_internal_calls(self, trace_response, tx_receipt):\n captured_calls = []\n struct_logs = trace_response['result']['structLogs']\n\n # Initial call from EOA to the contract\n initiator_address = tx_receipt['from']\n contract_address = tx_receipt['to'] # Contract being called\n current_call = {'from': initiator_address, 'to': contract_address}\n\n for log in struct_logs:\n op = log['op']\n stack = log['stack']\n\n if op in ['CALL', 'CALLCODE', 'DELEGATECALL', 'STATICCALL']:\n if len(stack) >= 7:\n # Extract 'to' address and value from the stack\n to_address = '0x' + stack[-2][-40:]\n value = int(stack[-3], 16) if op == 'CALL' else 0 # Value is relevant only for CALL\n\n captured_call = {'op': op, 'from': current_call['to'], 'to': to_address, 'value': value}\n captured_calls.append(captured_call)\n\n # Update the current call context\n current_call['from'] = current_call['to']\n current_call['to'] = to_address\n\n return captured_calls\n \n def calculate_net_changes(captured_calls):\n net_changes = {}\n for call in captured_calls:\n if call['from'] not in net_changes:\n net_changes[call['from']] = 0\n if call['to'] not in net_changes:\n net_changes[call['to']] = 0\n\n net_changes[call['from']] -= call['value']\n net_changes[call['to']] += call['value']\n\n return net_changes"
}
] | import sys
import os
from evm_indexer.fetcher import Fetcher
from evm_indexer.decoder import Decoder
from evm_indexer.internal_tracer import InternalTracer
from web3 import Web3 | 1,584 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
NODE_URL = 'https://seed.omchain.io'
fetcher = Fetcher(NODE_URL, is_poa=True)
| sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
NODE_URL = 'https://seed.omchain.io'
fetcher = Fetcher(NODE_URL, is_poa=True) | decoder = Decoder(fetcher=fetcher) | 1 | 2023-12-26 17:39:42+00:00 | 2k |
omkarcloud/google-scraper | src/google_scraper.py | [
{
"identifier": "write_output",
"path": "src/write_output.py",
"snippet": "def write_output(query, data, entity_type,transformer = kebab_case):\n\n query_kebab = transformer(query)\n make_folders(query_kebab)\n\n csv_path = f\"output/{query_kebab}/csv/\" \n json_path = f\"output/{query_kebab}/json/\"\n\n create(data,[], csv_path, json_path, query_kebab,entity_type)"
},
{
"identifier": "FAILED_DUE_TO_CREDITS_EXHAUSTED",
"path": "src/search.py",
"snippet": "FAILED_DUE_TO_CREDITS_EXHAUSTED = \"FAILED_DUE_TO_CREDITS_EXHAUSTED\""
},
{
"identifier": "FAILED_DUE_TO_NO_KEY",
"path": "src/search.py",
"snippet": "FAILED_DUE_TO_NO_KEY = \"FAILED_DUE_TO_NO_KEY\""
},
{
"identifier": "FAILED_DUE_TO_NOT_SUBSCRIBED",
"path": "src/search.py",
"snippet": "FAILED_DUE_TO_NOT_SUBSCRIBED = \"FAILED_DUE_TO_NOT_SUBSCRIBED\""
},
{
"identifier": "FAILED_DUE_TO_UNKNOWN_ERROR",
"path": "src/search.py",
"snippet": "FAILED_DUE_TO_UNKNOWN_ERROR = \"FAILED_DUE_TO_UNKNOWN_ERROR\""
},
{
"identifier": "search",
"path": "src/search.py",
"snippet": "@request(**default_request_options)\ndef search(_, data, metadata):\n if not metadata.get('key'):\n return DontCache({\n \"data\": None,\n \"error\":FAILED_DUE_TO_NO_KEY\n })\n max_items = data['max']\n url = \"https://google-scraper.p.rapidapi.com/search/\"\n qp = {\"query\": data['query']}\n params = {**qp, 'link':cl.join_link(url, query_params=qp)}\n\n request_data = {**metadata, \"params\": params}\n result = do_request(request_data)\n initial_results = cl.select(result, 'data', 'results', default=[])\n \n if not cl.select(result, 'error'):\n more_results = cl.select(result, 'data', 'results', default=[])\n print(f\"Got {len(more_results)} more results\")\n\n while cl.select(result, 'data', 'next') and (max_items is None or len(initial_results) < max_items):\n next = cl.select(result, 'data', 'next')\n\n params = {**qp, 'link':next}\n request_data = {**metadata, \"params\": params}\n result = do_request(request_data)\n if result.get('error'):\n break\n more_results = cl.select(result, 'data', 'results', default=[])\n print(f\"Got {len(more_results)} more results\")\n initial_results.extend(more_results)\n\n\n if cl.select(result, 'error'):\n return DontCache(result)\n else: \n if max_items is not None:\n initial_results = initial_results[:max_items]\n\n result['data']['results'] = initial_results\n return result"
}
] | from typing import List,Optional, Union, Dict
from botasaurus import bt
from .write_output import write_output
from .search import FAILED_DUE_TO_CREDITS_EXHAUSTED, FAILED_DUE_TO_NO_KEY,FAILED_DUE_TO_NOT_SUBSCRIBED, FAILED_DUE_TO_UNKNOWN_ERROR, search | 1,171 |
def clean_data(social_details):
success, credits_exhausted, not_subscribed, unknown_error, no_key = [], [], [], [], []
for detail in social_details:
if detail.get("error") is None:
success.append(detail)
elif detail["error"] == FAILED_DUE_TO_CREDITS_EXHAUSTED:
credits_exhausted.append(detail)
elif detail["error"] == FAILED_DUE_TO_NOT_SUBSCRIBED:
not_subscribed.append(detail)
elif detail["error"] == FAILED_DUE_TO_UNKNOWN_ERROR:
unknown_error.append(detail)
elif detail["error"] == FAILED_DUE_TO_NO_KEY:
no_key.append(detail)
return success, credits_exhausted, not_subscribed, unknown_error, no_key
def print_data_errors(credits_exhausted, not_subscribed, unknown_error, no_key):
if credits_exhausted:
name = "queries" if len(credits_exhausted) > 1 else "query"
print(f"Could not get data for {len(credits_exhausted)} {name} due to credit exhaustion. Please consider upgrading your plan by visiting https://rapidapi.com/Chetan11dev/api/google-scraper/pricing to continue scraping data.")
if not_subscribed:
name = "queries" if len(not_subscribed) > 1 else "query"
print(f"Could not get data for {len(not_subscribed)} {name} as you are not subscribed to Google Scraper API. Please subscribe to a free plan by visiting https://rapidapi.com/Chetan11dev/api/google-scraper/pricing")
if unknown_error:
name = "queries" if len(unknown_error) > 1 else "query"
print(f"Could not get data for {len(unknown_error)} {name} due to Unknown Error.")
if no_key:
name = "queries" if len(no_key) > 1 else "query"
print(f"Could not get data for {len(no_key)} {name} as you are not subscribed to Google Scraper API. Please subscribe to a free plan by visiting https://rapidapi.com/Chetan11dev/api/google-scraper/pricing")
class Google:
@staticmethod
|
def clean_data(social_details):
success, credits_exhausted, not_subscribed, unknown_error, no_key = [], [], [], [], []
for detail in social_details:
if detail.get("error") is None:
success.append(detail)
elif detail["error"] == FAILED_DUE_TO_CREDITS_EXHAUSTED:
credits_exhausted.append(detail)
elif detail["error"] == FAILED_DUE_TO_NOT_SUBSCRIBED:
not_subscribed.append(detail)
elif detail["error"] == FAILED_DUE_TO_UNKNOWN_ERROR:
unknown_error.append(detail)
elif detail["error"] == FAILED_DUE_TO_NO_KEY:
no_key.append(detail)
return success, credits_exhausted, not_subscribed, unknown_error, no_key
def print_data_errors(credits_exhausted, not_subscribed, unknown_error, no_key):
if credits_exhausted:
name = "queries" if len(credits_exhausted) > 1 else "query"
print(f"Could not get data for {len(credits_exhausted)} {name} due to credit exhaustion. Please consider upgrading your plan by visiting https://rapidapi.com/Chetan11dev/api/google-scraper/pricing to continue scraping data.")
if not_subscribed:
name = "queries" if len(not_subscribed) > 1 else "query"
print(f"Could not get data for {len(not_subscribed)} {name} as you are not subscribed to Google Scraper API. Please subscribe to a free plan by visiting https://rapidapi.com/Chetan11dev/api/google-scraper/pricing")
if unknown_error:
name = "queries" if len(unknown_error) > 1 else "query"
print(f"Could not get data for {len(unknown_error)} {name} due to Unknown Error.")
if no_key:
name = "queries" if len(no_key) > 1 else "query"
print(f"Could not get data for {len(no_key)} {name} as you are not subscribed to Google Scraper API. Please subscribe to a free plan by visiting https://rapidapi.com/Chetan11dev/api/google-scraper/pricing")
class Google:
@staticmethod | def search(query: Union[str, List[str]], max: Optional[int] = None, key: Optional[str] =None, use_cache: bool = True) -> Dict: | 5 | 2023-12-30 08:14:05+00:00 | 2k |
AI2lab/comfyUI-tool-2lab | nodes/tool/preview.py | [
{
"identifier": "downloadFileToTempFolder",
"path": "nodes/common/utils.py",
"snippet": "def downloadFileToTempFolder(url: str) -> str:\n try:\n response = requests.get(url)\n response.raise_for_status()\n\n try:\n if not os.path.exists(temp_folder):\n os.makedirs(temp_folder)\n except Exception as e:\n print(f\"Fail to create directory '{temp_folder}. Error: {e}'\")\n return None\n\n # temp file name\n ext = getFileNameExt(url)\n curtime = str(int(time.time()))\n filename = curtime\n if curtime != \"\":\n filename = curtime+\".\"+ext\n file_path = os.path.join(temp_folder,filename)\n except:\n return ''\n return file_path"
},
{
"identifier": "get_project_name",
"path": "nodes/constants.py",
"snippet": "def get_project_name(name):\n return '{} ({})'.format(name, PROJECT_NAME)"
},
{
"identifier": "get_project_category",
"path": "nodes/constants.py",
"snippet": "def get_project_category(sub_dirs = None):\n start = \"🦊\" + PROJECT_NAME\n if sub_dirs is None:\n return start\n else:\n return \"{}/{}\".format(start,sub_dirs)"
}
] | import numpy as np
import torch
from PIL import Image
from ..common.utils import downloadFileToTempFolder
from ..constants import get_project_name, get_project_category | 812 |
NODE_CATEGORY = get_project_category("util/preview")
class ShowText:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"string": ("STRING", {"forceInput": True}),
},
"hidden": {
"unique_id": "UNIQUE_ID",
"extra_pnginfo": "EXTRA_PNGINFO",},
}
NAME = get_project_name('show_text')
CATEGORY = NODE_CATEGORY
RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("string",)
OUTPUT_NODE = True
FUNCTION = "doWork"
def doWork(self, string, unique_id=None, extra_pnginfo=None):
return {"ui": {"string": [string, ]}, "result": (string,)}
class ShowWebImage:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"image_url": ("STRING", {"multiline": False}),
"RGBA": (["false", "true"],{"default":False}),
},
}
NAME = get_project_name('show_web_image')
CATEGORY = NODE_CATEGORY
RETURN_TYPES = ("IMAGE", "MASK","TEXT","filePath")
RETURN_NAMES = ("image", "mask","image_url","filePath")
OUTPUT_NODE = True
FUNCTION = "doWork"
def doWork(self, image_url, RGBA):
print(image_url)
i = None
file_path = ''
try:
if image_url.startswith('http'):
file_path,i = self.download_image(image_url)
else:
file_path = image_url
i = Image.open(image_url)
if not i:
return
image = i
if not RGBA:
image = image.convert('RGB')
image = np.array(image).astype(np.float32) / 255.0
image = torch.from_numpy(image)[None,]
# RGBA - mask
if 'A' in i.getbands():
mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
mask = 1. - torch.from_numpy(mask)
else:
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
return (image, mask, image_url,file_path)
except :
pass
return (None, None, image_url,file_path)
def download_image(self, url):
|
NODE_CATEGORY = get_project_category("util/preview")
class ShowText:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"string": ("STRING", {"forceInput": True}),
},
"hidden": {
"unique_id": "UNIQUE_ID",
"extra_pnginfo": "EXTRA_PNGINFO",},
}
NAME = get_project_name('show_text')
CATEGORY = NODE_CATEGORY
RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("string",)
OUTPUT_NODE = True
FUNCTION = "doWork"
def doWork(self, string, unique_id=None, extra_pnginfo=None):
return {"ui": {"string": [string, ]}, "result": (string,)}
class ShowWebImage:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"image_url": ("STRING", {"multiline": False}),
"RGBA": (["false", "true"],{"default":False}),
},
}
NAME = get_project_name('show_web_image')
CATEGORY = NODE_CATEGORY
RETURN_TYPES = ("IMAGE", "MASK","TEXT","filePath")
RETURN_NAMES = ("image", "mask","image_url","filePath")
OUTPUT_NODE = True
FUNCTION = "doWork"
def doWork(self, image_url, RGBA):
print(image_url)
i = None
file_path = ''
try:
if image_url.startswith('http'):
file_path,i = self.download_image(image_url)
else:
file_path = image_url
i = Image.open(image_url)
if not i:
return
image = i
if not RGBA:
image = image.convert('RGB')
image = np.array(image).astype(np.float32) / 255.0
image = torch.from_numpy(image)[None,]
# RGBA - mask
if 'A' in i.getbands():
mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
mask = 1. - torch.from_numpy(mask)
else:
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
return (image, mask, image_url,file_path)
except :
pass
return (None, None, image_url,file_path)
def download_image(self, url): | file_path = downloadFileToTempFolder(url) | 0 | 2023-12-24 14:44:13+00:00 | 2k |
Amirtheahmed/ddd-cqrs-fastapi | src/contexts/photostore/photo/application/createone/PhotoCreator.py | [
{
"identifier": "PhotoRepository",
"path": "src/contexts/photostore/photo/domain/PhotoRepository.py",
"snippet": "class PhotoRepository(ABC):\n\n async def create_one(self, photo: Photo) -> NoReturn:\n raise NotImplementedError()"
},
{
"identifier": "Photo",
"path": "src/contexts/photostore/photo/domain/entities/Photo.py",
"snippet": "class Photo(AggregateRoot):\n\n def __init__(\n self,\n photo_id: PhotoId,\n name: PhotoName,\n user_id: UserId,\n file: PhotoFile,\n tags: PhotoTags,\n ):\n super().__init__()\n self.id = photo_id\n self.name = name\n self.user_id = user_id\n self.file = file\n self.tags = tags\n\n @staticmethod\n def create(photo_id: PhotoId, name: PhotoName, user_id: UserId, file: PhotoFile, tags: PhotoTags):\n photo = Photo(photo_id, name, user_id, file, tags)\n event = PhotoCreatedDomainEvent(photo.id.value(), photo_id, user_id, name, tags)\n photo.record_event(event)\n return photo\n\n @staticmethod\n def create_from_primitives(raw_data: Dict[str, Any]):\n photo = Photo(\n PhotoId(raw_data.get('id')),\n PhotoName(raw_data.get('name')),\n UserId(raw_data.get('user-id')),\n PhotoFile(raw_data.get('file')),\n PhotoTags([PhotoTag(tag) for tag in raw_data.get('tags', default=[])]),\n )\n return photo\n\n def to_primitives(self) -> Union[Dict, List]:\n return {\n 'id': self.id.value(),\n 'name': self.name.value(),\n 'user-id': self.user_id.value(),\n 'tags': self.tags.values(),\n }"
},
{
"identifier": "PhotoFile",
"path": "src/contexts/photostore/photo/domain/entities/PhotoFile.py",
"snippet": "class PhotoFile(ValueObject):\n\n def __init__(self, content: bytes):\n super().__init__(content)"
},
{
"identifier": "PhotoId",
"path": "src/contexts/photostore/photo/domain/entities/PhotoId.py",
"snippet": "class PhotoId(ValueObject):\n\n def __init__(self, value: str):\n super().__init__(value)\n if not Uuid.is_valid_uuid(value):\n raise ValueObjectValidationError(f'PhotoId must be UUID V4. <{value}> found.')"
},
{
"identifier": "PhotoName",
"path": "src/contexts/photostore/photo/domain/entities/PhotoName.py",
"snippet": "class PhotoName(ValueObject):\n\n def __init__(self, value: str):\n super().__init__(value)"
},
{
"identifier": "UserId",
"path": "src/contexts/photostore/photo/domain/entities/UserId.py",
"snippet": "class UserId(ValueObject):\n\n def __init__(self, value: str):\n super().__init__(value)"
},
{
"identifier": "EventBus",
"path": "src/contexts/shared/domain/EventBus.py",
"snippet": "class EventBus(Interface):\n\n @abstractmethod\n async def publish(self, events: List[DomainEvent]):\n raise NotImplementedError()\n\n @abstractmethod\n def add_subscribers(self, subscribers: List[EventSubscriber]):\n raise NotImplementedError()\n\n @abstractmethod\n def start(self):\n raise NotImplementedError()"
}
] | from src.contexts.photostore.photo.domain.PhotoRepository import PhotoRepository
from src.contexts.photostore.photo.domain.entities.Photo import Photo
from src.contexts.photostore.photo.domain.entities.PhotoFile import PhotoFile
from src.contexts.photostore.photo.domain.entities.PhotoId import PhotoId
from src.contexts.photostore.photo.domain.entities.PhotoName import PhotoName
from src.contexts.photostore.photo.domain.entities.UserId import UserId
from src.contexts.shared.domain.EventBus import EventBus | 891 |
class PhotoCreator:
def __init__(self, photo_repository: PhotoRepository, event_bus: EventBus):
self.__photo_repository = photo_repository
self.__event_bus = event_bus
|
class PhotoCreator:
def __init__(self, photo_repository: PhotoRepository, event_bus: EventBus):
self.__photo_repository = photo_repository
self.__event_bus = event_bus
| async def run(self, photo_id: PhotoId, name: PhotoName, user_id: UserId, file: PhotoFile): | 2 | 2023-12-27 13:58:25+00:00 | 2k |
JINO-ROHIT/RAG-with-Memory | vlite_db/main.py | [
{
"identifier": "EmbeddingModel",
"path": "vlite_db/model.py",
"snippet": "class EmbeddingModel:\n '''\n EmbeddingModel runs a transformer model and returns the embedding for a given text.\n '''\n def __init__(self, model_name='sentence-transformers/all-MiniLM-L6-v2'):\n self.tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) # use_fast=True\n\n self.model = AutoModel.from_pretrained(model_name)\n self.dimension = self.model.embeddings.position_embeddings.embedding_dim\n self.max_seq_length = self.model.embeddings.position_embeddings.num_embeddings\n \n\n #print(\"Tokenizer:\", self.tokenizer)\n # print(\"Dimension:\", self.dimension)\n # print(\"Max sequence length:\", self.max_seq_length)\n\n def embed(self, texts, max_seq_length=256, device=\"mps\"):\n\n if(torch.backends.mps.is_available()):\n dev = torch.device(\"mps\")\n else:\n dev = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n device = torch.device(dev) # Create a torch.device object\n print(\"Device:\", device)\n self.model.to(device) # Move the model to the specified device\n\n encoded_input = self.tokenizer(texts, padding=True, truncation=True, return_tensors='pt', max_length=max_seq_length)\n print(\"Encoded input done\",encoded_input['input_ids'].shape)\n \n if encoded_input['input_ids'].shape[0] > 1300:\n print(\"Encoded input too large, defaulting to CPU\")\n device = torch.device(\"cpu\")\n self.model.to(device) # Move the model to the specified device\n \n encoded_input = {name: tensor.to(device) for name, tensor in encoded_input.items()} # Move all input tensors to the specified device\n print(\"Encoded input moved to device\")\n \n with torch.no_grad():\n model_output = self.model(**encoded_input)\n\n embeddings = mean_pooling(model_output, encoded_input['attention_mask'], device=device)\n tensor_embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)\n np_embeddings = tensor_embeddings.cpu().numpy() # Move tensor to CPU before converting to numpy\n\n return np_embeddings\n\n def token_count(self, texts):\n tokens = 0\n for text in texts:\n tokens+=len(self.tokenizer.tokenize(text))"
},
{
"identifier": "chop_and_chunk",
"path": "vlite_db/utils.py",
"snippet": "def chop_and_chunk(text, max_seq_length=1024):\n \"\"\"\n Chop and chunk a text into smaller pieces of text. \n \n Args:\n text: string, list of strings, or array of strings \n max_seq_length: maximum length of the text\n \"\"\"\n \n chunks = []\n chunk = ''\n for tokens in text.split(' '):\n count = 0\n chunk += tokens + ' '\n if len(chunk) > max_seq_length:\n chunks.append(chunk)\n chunk = ''\n return chunks"
},
{
"identifier": "cos_sim",
"path": "vlite_db/utils.py",
"snippet": "def cos_sim(a, b):\n sims = a @ b.T\n sims /= np.linalg.norm(a) * np.linalg.norm(b, axis=1) \n return sims"
}
] | import numpy as np
import datetime
from uuid import uuid4
from .model import EmbeddingModel
from .utils import chop_and_chunk, cos_sim | 1,156 |
class VLite:
'''
vlite is a simple vector database that stores vectors in a numpy array.
'''
def __init__(self, collection=None,device='mps',model_name=None):
# Filename must be unique between runs. Saving to the same file will append vectors to previous run's vectors
if collection is None:
current_datetime = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
collection = f"vlite_{current_datetime}.npz"
self.collection = collection
self.device = device
self.model = EmbeddingModel() if model_name is None else EmbeddingModel(model_name)
try:
with np.load(self.collection, allow_pickle=True) as data:
self.texts = data['texts'].tolist()
self.metadata = data['metadata'].tolist()
self.vectors = data['vectors']
except FileNotFoundError:
self.texts = []
self.metadata = {}
self.vectors = np.empty((0, self.model.dimension))
def add_vector(self, vector):
self.vectors = np.vstack((self.vectors, vector))
def get_similar_vectors(self, vector, top_k=5):
sims = cos_sim(vector, self.vectors)
sims = sims[0]
# print("[get_similar_vectors] Sims:", sims.shape)
top_k_idx = np.argsort(sims)[::-1][:top_k]
# print("[get_similar_vectors] Top k idx:", top_k_idx)
# print("[get_similar_vectors] Top k sims:", sims[top_k_idx])
return top_k_idx, sims[top_k_idx]
def memorize(self, text, id=None, metadata=None):
id = id or str(uuid4())
|
class VLite:
'''
vlite is a simple vector database that stores vectors in a numpy array.
'''
def __init__(self, collection=None,device='mps',model_name=None):
# Filename must be unique between runs. Saving to the same file will append vectors to previous run's vectors
if collection is None:
current_datetime = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
collection = f"vlite_{current_datetime}.npz"
self.collection = collection
self.device = device
self.model = EmbeddingModel() if model_name is None else EmbeddingModel(model_name)
try:
with np.load(self.collection, allow_pickle=True) as data:
self.texts = data['texts'].tolist()
self.metadata = data['metadata'].tolist()
self.vectors = data['vectors']
except FileNotFoundError:
self.texts = []
self.metadata = {}
self.vectors = np.empty((0, self.model.dimension))
def add_vector(self, vector):
self.vectors = np.vstack((self.vectors, vector))
def get_similar_vectors(self, vector, top_k=5):
sims = cos_sim(vector, self.vectors)
sims = sims[0]
# print("[get_similar_vectors] Sims:", sims.shape)
top_k_idx = np.argsort(sims)[::-1][:top_k]
# print("[get_similar_vectors] Top k idx:", top_k_idx)
# print("[get_similar_vectors] Top k sims:", sims[top_k_idx])
return top_k_idx, sims[top_k_idx]
def memorize(self, text, id=None, metadata=None):
id = id or str(uuid4()) | chunks = chop_and_chunk(text) | 1 | 2023-12-25 07:16:09+00:00 | 2k |
avataar/bg_electricity_regulated_pricing | custom_components/bg_electricity_regulated_pricing/sensor.py | [
{
"identifier": "CONF_TARIFF_TYPE",
"path": "custom_components/bg_electricity_regulated_pricing/const.py",
"snippet": "CONF_TARIFF_TYPE = \"tariff_type\""
},
{
"identifier": "CONF_PROVIDER",
"path": "custom_components/bg_electricity_regulated_pricing/const.py",
"snippet": "CONF_PROVIDER = \"provider\""
},
{
"identifier": "CONF_CUSTOM_DAY_PRICE",
"path": "custom_components/bg_electricity_regulated_pricing/const.py",
"snippet": "CONF_CUSTOM_DAY_PRICE = \"custom_day_price\""
},
{
"identifier": "CONF_CUSTOM_NIGHT_PRICE",
"path": "custom_components/bg_electricity_regulated_pricing/const.py",
"snippet": "CONF_CUSTOM_NIGHT_PRICE = \"custom_night_price\""
},
{
"identifier": "PROVIDER_PRICES",
"path": "custom_components/bg_electricity_regulated_pricing/const.py",
"snippet": "PROVIDER_PRICES = {\n # Section 6.1, https://www.dker.bg/uploads/reshenia/2023/res_c_14_23.pdf\n \"electrohold\": {\n \"day\": .14875,\n \"night\": .05997,\n \"fees\": .01623 + .00754 + .04232\n },\n # Section 6.1, https://www.dker.bg/uploads/reshenia/2023/res_c_14_23.pdf\n \"evn\": {\n \"day\": .14667,\n \"night\": .05531,\n \"fees\": .01623 + .00803 + .04366\n },\n # Section 6.3, https://www.dker.bg/uploads/reshenia/2023/res_c_14_23.pdf\n \"energo_pro\": {\n \"day\": .15076,\n \"night\": .05279,\n \"fees\": .01623 + .00959 + .04825\n }\n}"
},
{
"identifier": "CONF_CLOCK_OFFSET",
"path": "custom_components/bg_electricity_regulated_pricing/const.py",
"snippet": "CONF_CLOCK_OFFSET = \"clock_offset\""
},
{
"identifier": "BGN_PER_KILOWATT_HOUR",
"path": "custom_components/bg_electricity_regulated_pricing/const.py",
"snippet": "BGN_PER_KILOWATT_HOUR = f\"BGN/{UnitOfEnergy.KILO_WATT_HOUR}\""
},
{
"identifier": "VAT_RATE",
"path": "custom_components/bg_electricity_regulated_pricing/const.py",
"snippet": "VAT_RATE = 0.2"
},
{
"identifier": "DOMAIN",
"path": "custom_components/bg_electricity_regulated_pricing/const.py",
"snippet": "DOMAIN = \"bg_electricity_regulated_pricing\""
}
] | from homeassistant.components.sensor import SensorEntity, SensorEntityDescription, \
SensorStateClass
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util import utcnow
from homeassistant.helpers.device_registry import DeviceEntryType, DeviceInfo
from .const import CONF_TARIFF_TYPE, CONF_PROVIDER, CONF_CUSTOM_DAY_PRICE, \
CONF_CUSTOM_NIGHT_PRICE, PROVIDER_PRICES, CONF_CLOCK_OFFSET, \
BGN_PER_KILOWATT_HOUR, VAT_RATE, DOMAIN | 753 | """Sensor platform for bg_electricity_regulated_pricing integration."""
from __future__ import annotations
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Initialize bg_electricity_regulated_pricing config entry."""
name = config_entry.title
unique_id = config_entry.entry_id
tariff_type = config_entry.options[CONF_TARIFF_TYPE]
clock_offset = config_entry.options[CONF_CLOCK_OFFSET]
provider = config_entry.options[CONF_PROVIDER]
if provider == "custom":
| """Sensor platform for bg_electricity_regulated_pricing integration."""
from __future__ import annotations
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Initialize bg_electricity_regulated_pricing config entry."""
name = config_entry.title
unique_id = config_entry.entry_id
tariff_type = config_entry.options[CONF_TARIFF_TYPE]
clock_offset = config_entry.options[CONF_CLOCK_OFFSET]
provider = config_entry.options[CONF_PROVIDER]
if provider == "custom": | price_day = config_entry.options[CONF_CUSTOM_DAY_PRICE] | 2 | 2023-12-24 11:13:54+00:00 | 2k |
Qazalbash/jaxtro | jaxtro/main.py | [
{
"identifier": "parser",
"path": "jaxtro/utils/parser.py",
"snippet": "def parse_config(config_path: str) -> dict:"
},
{
"identifier": "PopulationGenerator",
"path": "jaxtro/utils/popgen.py",
"snippet": "class PopulationGenerator:\n \"\"\"Class to generate population and save them to disk.\"\"\"\n\n def __init__(self, general: dict, models: dict) -> None:\n \"\"\"__init__ method for PopulationGenerator.\n\n Parameters\n ----------\n config : dict\n Configuration dictionary for PopulationGenerator.\n \"\"\"\n self.check_general(general)\n for model in models:\n self.check_models(model)\n\n self._size: int = general[\"size\"]\n self._error_scale: float = general[\"error_scale\"]\n self._error_size: int = general[\"error_size\"]\n self._root_container: str = general[\"root_container\"]\n self._event_filename: str = general[\"event_filename\"]\n self._config_filename: str = general[\"config_filename\"]\n self._models: list[ContinuousRV] = models\n\n @staticmethod\n def check_general(general: dict) -> None:\n \"\"\"Check if all the required configs are present.\"\"\"\n assert general.get(\"size\", None) is not None\n assert general.get(\"error_scale\", None) is not None\n assert general.get(\"error_size\", None) is not None\n assert general.get(\"root_container\", None) is not None\n assert general.get(\"event_filename\", None) is not None\n assert general.get(\"config_filename\", None) is not None\n\n @staticmethod\n def check_models(model: dict) -> None:\n \"\"\"Check if all the required configs are present.\"\"\"\n assert model.get(\"model\", None) is not None\n assert model.get(\"config_vars\", None) is not None\n assert model.get(\"col_names\", None) is not None\n assert model.get(\"params\", None) is not None\n\n def generate(self):\n \"\"\"Generate population and save them to disk.\"\"\"\n os.makedirs(self._root_container, exist_ok=True)\n\n container = f\"{self._root_container}\"\n\n os.makedirs(container, exist_ok=True)\n\n config_vals = []\n col_names = []\n realisations = np.empty((self._size, 0))\n\n for model in self._models:\n\n model_instance: ContinuousRV = eval(model[\"model\"])(**model[\"params\"])\n rvs = model_instance.rvs(self._size)\n realisations = jnp.concatenate((realisations, rvs), axis=1)\n\n config_vals.extend([(x, model[\"params\"][x]) for x in model[\"config_vars\"]])\n col_names.extend(model[\"col_names\"])\n\n dump_configurations(\n f\"{container}/{self._config_filename}\",\n *config_vals,\n )\n\n for event_num, realisation in tqdm(enumerate(realisations),\n desc=f\"Generating events\",\n total=self._size,\n unit=\" events\",\n unit_scale=True):\n\n filename = f\"{container}/{self._event_filename.format(event_num)}\"\n\n realisation_err = add_normal_error(\n *realisation,\n scale=self._error_scale,\n size=self._error_size,\n )\n\n np.savetxt(\n filename,\n realisation_err,\n header=\"\\t\".join(col_names),\n )"
}
] | from .utils import PopulationGenerator, parser | 981 | # Copyright 2023 The Jaxtro Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def main():
args = parser.cmd_parser.parse_args()
configuration_dict = parser.parse_config(args.my_config)
general = configuration_dict['general']
models = [configuration_dict.get('mass_model', None), configuration_dict.get('spin_model', None)]
| # Copyright 2023 The Jaxtro Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def main():
args = parser.cmd_parser.parse_args()
configuration_dict = parser.parse_config(args.my_config)
general = configuration_dict['general']
models = [configuration_dict.get('mass_model', None), configuration_dict.get('spin_model', None)]
| pg = PopulationGenerator(general=general, models=models) | 1 | 2023-12-24 21:55:35+00:00 | 2k |
smonsays/modular-hyperteacher | metax/learner/reptile.py | [
{
"identifier": "Dataset",
"path": "metax/data/base.py",
"snippet": "class Dataset(NamedTuple):\n x: Array\n y: Array\n info: Dict = dict()"
},
{
"identifier": "batch_generator",
"path": "metax/data/utils.py",
"snippet": "def batch_generator(rng, datastruct, steps, batch_size):\n \"\"\"\n Add leading dims to datastruct resulting in (steps, batch_size, *data.shape).\n If batch_size is None, repeat each data leaf, otherwise sample random batches.\n \"\"\"\n if batch_size is None or batch_size < 1:\n # Repeat whole data on new leading dim for number of steps\n def repeat(x):\n return jnp.repeat(jnp.expand_dims(x, axis=0), steps, axis=0)\n\n return jtu.tree_map(repeat, datastruct)\n\n else:\n rng_batch = jax.random.split(rng, steps)\n batch_get_batch = jax.vmap(get_batch, in_axes=(0, None, None))\n\n return batch_get_batch(rng_batch, datastruct, batch_size)"
},
{
"identifier": "LearnedInit",
"path": "metax/module/init.py",
"snippet": "class LearnedInit(MetaModule):\n def __init__(self, loss_fn_inner, loss_fn_outer, base_learner, reg_strength):\n super().__init__(loss_fn_inner, loss_fn_outer)\n self.base_learner = base_learner\n\n if reg_strength is not None:\n # Use iMAML regularizer towards meta-learned init\n key_map = {\"base_learner\": \"base_learner_init\"}\n\n self.loss_fn_inner += energy.iMAML(\n reg_strength=reg_strength,\n key_map=key_map,\n reduction=\"sum\"\n )\n\n def __call__(self, rng, state, hstate, params, hparams, input, is_training):\n output, state = self.base_learner.apply(\n params.base_learner, state.base_learner, rng, input, is_training\n )\n return output, (LearnedInitState(state), hstate)\n\n def reset_hparams(self, rng, sample_input):\n params_base_learner, _ = self.base_learner.init(rng, sample_input, is_training=True)\n\n # Re-using params container here to simplify implementation of reptile\n return LearnedInitMetaParams(params_base_learner), LearnedInitMetaState()\n\n def reset_params(self, rng, hparams, hstate, sample_input):\n _, state_base_learner = self.base_learner.init(rng, sample_input, is_training=True)\n\n return LearnedInitParams(hparams.base_learner_init), LearnedInitState(state_base_learner)"
},
{
"identifier": "LearnedInitMetaParams",
"path": "metax/module/init.py",
"snippet": "class LearnedInitMetaParams(NamedTuple):\n base_learner_init: Dict"
},
{
"identifier": "append_keys",
"path": "metax/utils/utils.py",
"snippet": "def append_keys(dictionary, suffix):\n return {key + \"_\" + suffix: value for key, value in dictionary.items()}"
},
{
"identifier": "MetaGradLearner",
"path": "metax/learner/base.py",
"snippet": "class MetaGradLearner(MetaLearnerInnerGradientDescent):\n \"\"\"\n Abstract base class for meta-learning algorithms that estimate the meta-gradient.\n \"\"\"\n\n def __init__(\n self,\n meta_model: MetaModule,\n batch_size: int,\n steps_inner: int,\n optim_fn_inner: optax.GradientTransformation,\n optim_fn_outer: optax.GradientTransformation,\n ):\n super().__init__(meta_model, batch_size, steps_inner, optim_fn_inner)\n self.optim_fn_outer = optim_fn_outer\n\n self.batch_grad = jax.vmap(self.grad, in_axes=(0, None, None, 0))\n\n @abc.abstractmethod\n def grad(\n self, rng: chex.PRNGKey, hstate: HState, hparams: HParams, metadataset: data.MetaDataset\n ) -> Tuple[chex.Array, HState, Dict]:\n pass\n\n def update(self, rng, meta_state, metadataset: data.MetaDataset):\n rng_batch = jax.random.split(rng, len(metadataset.train.x))\n hgrads, hstate, metrics = self.batch_grad(\n rng_batch, meta_state.hstate, meta_state.hparams, metadataset\n )\n\n hgrads = jtu.tree_map(partial(jnp.mean, axis=0), hgrads) # Average hgrads across tasks\n hparams_update, optim_state = self.optim_fn_outer.update(\n hgrads, meta_state.optim, meta_state.hparams\n )\n hparams = optax.apply_updates(meta_state.hparams, hparams_update)\n\n # HACK: Averaging over the model state might result in unexpected behaviour\n # HACK: Averaging might change dtype (e.g. int to float), this simply casts it back\n hstate_dtypes = jtu.tree_map(jnp.dtype, hstate)\n hstate = jtu.tree_map(partial(jnp.mean, axis=0), hstate)\n hstate = jtu.tree_map(jax.lax.convert_element_type, hstate, hstate_dtypes)\n metrics = jtu.tree_map(partial(jnp.mean, axis=0), metrics)\n\n return MetaLearnerState(hparams=hparams, optim=optim_state, hstate=hstate), metrics"
}
] | import jax
import jax.numpy as jnp
import jax.tree_util as jtu
import optax
from metax.data import Dataset, batch_generator
from metax.module import LearnedInit
from metax.module.init import LearnedInitMetaParams
from metax.utils import append_keys
from .base import MetaGradLearner | 1,497 | """
Copyright (c) Simon Schug
All rights reserved.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
| """
Copyright (c) Simon Schug
All rights reserved.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
| class Reptile(MetaGradLearner): | 5 | 2023-12-22 16:35:49+00:00 | 2k |
AContesini/Convert_PDF_to_DOCX_or_vice-versa | venv/Lib/site-packages/tqdm/contrib/concurrent.py | [
{
"identifier": "tqdm",
"path": "venv/Lib/site-packages/tqdm/auto.py",
"snippet": "class tqdm(notebook_tqdm, asyncio_tqdm): # pylint: disable=inconsistent-mro\n pass"
},
{
"identifier": "TqdmWarning",
"path": "venv/Lib/site-packages/tqdm/std.py",
"snippet": "class TqdmWarning(Warning):\n \"\"\"base class for all tqdm warnings.\n\n Used for non-external-code-breaking errors, such as garbled printing.\n \"\"\"\n def __init__(self, msg, fp_write=None, *a, **k):\n if fp_write is not None:\n fp_write(\"\\n\" + self.__class__.__name__ + \": \" + str(msg).rstrip() + '\\n')\n else:\n super(TqdmWarning, self).__init__(msg, *a, **k)"
}
] | from contextlib import contextmanager
from operator import length_hint
from os import cpu_count
from ..auto import tqdm as tqdm_auto
from ..std import TqdmWarning
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import ProcessPoolExecutor
from warnings import warn | 1,153 | """
Thin wrappers around `concurrent.futures`.
"""
__author__ = {"github.com/": ["casperdcl"]}
__all__ = ['thread_map', 'process_map']
@contextmanager
def ensure_lock(tqdm_class, lock_name=""):
"""get (create if necessary) and then restore `tqdm_class`'s lock"""
old_lock = getattr(tqdm_class, '_lock', None) # don't create a new lock
lock = old_lock or tqdm_class.get_lock() # maybe create a new lock
lock = getattr(lock, lock_name, lock) # maybe subtype
tqdm_class.set_lock(lock)
yield lock
if old_lock is None:
del tqdm_class._lock
else:
tqdm_class.set_lock(old_lock)
def _executor_map(PoolExecutor, fn, *iterables, **tqdm_kwargs):
"""
Implementation of `thread_map` and `process_map`.
Parameters
----------
tqdm_class : [default: tqdm.auto.tqdm].
max_workers : [default: min(32, cpu_count() + 4)].
chunksize : [default: 1].
lock_name : [default: "":str].
"""
kwargs = tqdm_kwargs.copy()
if "total" not in kwargs:
kwargs["total"] = length_hint(iterables[0])
tqdm_class = kwargs.pop("tqdm_class", tqdm_auto)
max_workers = kwargs.pop("max_workers", min(32, cpu_count() + 4))
chunksize = kwargs.pop("chunksize", 1)
lock_name = kwargs.pop("lock_name", "")
with ensure_lock(tqdm_class, lock_name=lock_name) as lk:
# share lock in case workers are already using `tqdm`
with PoolExecutor(max_workers=max_workers, initializer=tqdm_class.set_lock,
initargs=(lk,)) as ex:
return list(tqdm_class(ex.map(fn, *iterables, chunksize=chunksize), **kwargs))
def thread_map(fn, *iterables, **tqdm_kwargs):
"""
Equivalent of `list(map(fn, *iterables))`
driven by `concurrent.futures.ThreadPoolExecutor`.
Parameters
----------
tqdm_class : optional
`tqdm` class to use for bars [default: tqdm.auto.tqdm].
max_workers : int, optional
Maximum number of workers to spawn; passed to
`concurrent.futures.ThreadPoolExecutor.__init__`.
[default: max(32, cpu_count() + 4)].
"""
return _executor_map(ThreadPoolExecutor, fn, *iterables, **tqdm_kwargs)
def process_map(fn, *iterables, **tqdm_kwargs):
"""
Equivalent of `list(map(fn, *iterables))`
driven by `concurrent.futures.ProcessPoolExecutor`.
Parameters
----------
tqdm_class : optional
`tqdm` class to use for bars [default: tqdm.auto.tqdm].
max_workers : int, optional
Maximum number of workers to spawn; passed to
`concurrent.futures.ProcessPoolExecutor.__init__`.
[default: min(32, cpu_count() + 4)].
chunksize : int, optional
Size of chunks sent to worker processes; passed to
`concurrent.futures.ProcessPoolExecutor.map`. [default: 1].
lock_name : str, optional
Member of `tqdm_class.get_lock()` to use [default: mp_lock].
"""
if iterables and "chunksize" not in tqdm_kwargs:
# default `chunksize=1` has poor performance for large iterables
# (most time spent dispatching items to workers).
longest_iterable_len = max(map(length_hint, iterables))
if longest_iterable_len > 1000:
warn("Iterable length %d > 1000 but `chunksize` is not set."
" This may seriously degrade multiprocess performance."
" Set `chunksize=1` or more." % longest_iterable_len,
| """
Thin wrappers around `concurrent.futures`.
"""
__author__ = {"github.com/": ["casperdcl"]}
__all__ = ['thread_map', 'process_map']
@contextmanager
def ensure_lock(tqdm_class, lock_name=""):
"""get (create if necessary) and then restore `tqdm_class`'s lock"""
old_lock = getattr(tqdm_class, '_lock', None) # don't create a new lock
lock = old_lock or tqdm_class.get_lock() # maybe create a new lock
lock = getattr(lock, lock_name, lock) # maybe subtype
tqdm_class.set_lock(lock)
yield lock
if old_lock is None:
del tqdm_class._lock
else:
tqdm_class.set_lock(old_lock)
def _executor_map(PoolExecutor, fn, *iterables, **tqdm_kwargs):
"""
Implementation of `thread_map` and `process_map`.
Parameters
----------
tqdm_class : [default: tqdm.auto.tqdm].
max_workers : [default: min(32, cpu_count() + 4)].
chunksize : [default: 1].
lock_name : [default: "":str].
"""
kwargs = tqdm_kwargs.copy()
if "total" not in kwargs:
kwargs["total"] = length_hint(iterables[0])
tqdm_class = kwargs.pop("tqdm_class", tqdm_auto)
max_workers = kwargs.pop("max_workers", min(32, cpu_count() + 4))
chunksize = kwargs.pop("chunksize", 1)
lock_name = kwargs.pop("lock_name", "")
with ensure_lock(tqdm_class, lock_name=lock_name) as lk:
# share lock in case workers are already using `tqdm`
with PoolExecutor(max_workers=max_workers, initializer=tqdm_class.set_lock,
initargs=(lk,)) as ex:
return list(tqdm_class(ex.map(fn, *iterables, chunksize=chunksize), **kwargs))
def thread_map(fn, *iterables, **tqdm_kwargs):
"""
Equivalent of `list(map(fn, *iterables))`
driven by `concurrent.futures.ThreadPoolExecutor`.
Parameters
----------
tqdm_class : optional
`tqdm` class to use for bars [default: tqdm.auto.tqdm].
max_workers : int, optional
Maximum number of workers to spawn; passed to
`concurrent.futures.ThreadPoolExecutor.__init__`.
[default: max(32, cpu_count() + 4)].
"""
return _executor_map(ThreadPoolExecutor, fn, *iterables, **tqdm_kwargs)
def process_map(fn, *iterables, **tqdm_kwargs):
"""
Equivalent of `list(map(fn, *iterables))`
driven by `concurrent.futures.ProcessPoolExecutor`.
Parameters
----------
tqdm_class : optional
`tqdm` class to use for bars [default: tqdm.auto.tqdm].
max_workers : int, optional
Maximum number of workers to spawn; passed to
`concurrent.futures.ProcessPoolExecutor.__init__`.
[default: min(32, cpu_count() + 4)].
chunksize : int, optional
Size of chunks sent to worker processes; passed to
`concurrent.futures.ProcessPoolExecutor.map`. [default: 1].
lock_name : str, optional
Member of `tqdm_class.get_lock()` to use [default: mp_lock].
"""
if iterables and "chunksize" not in tqdm_kwargs:
# default `chunksize=1` has poor performance for large iterables
# (most time spent dispatching items to workers).
longest_iterable_len = max(map(length_hint, iterables))
if longest_iterable_len > 1000:
warn("Iterable length %d > 1000 but `chunksize` is not set."
" This may seriously degrade multiprocess performance."
" Set `chunksize=1` or more." % longest_iterable_len, | TqdmWarning, stacklevel=2) | 1 | 2023-12-24 15:46:18+00:00 | 2k |
willfinnigan/RetroBioCat_2 | rbc2/expansion/expanders/action_getters/aizynthfinder/aizynthfinder_actions.py | [
{
"identifier": "does_aizynthfinder_exist",
"path": "rbc2/configs/download_data_files/download_aizynthfinder.py",
"snippet": "def does_aizynthfinder_exist() -> bool:\n if not os.path.exists(f\"{path_to_data_folder}/aizynthfinder/uspto_model.hdf5\"):\n return False\n if not os.path.exists(f\"{path_to_data_folder}/aizynthfinder/uspto_templates.hdf5\"):\n return False\n return True"
},
{
"identifier": "download_aizynthfinder_model",
"path": "rbc2/configs/download_data_files/download_aizynthfinder.py",
"snippet": "def download_aizynthfinder_model():\n aizynthfinder_model = \"https://figshare.com/ndownloader/files/23086454\"\n aizynthfinder_templates = \"https://figshare.com/ndownloader/files/23086457\"\n\n # if aizynthfinder folder doesn't exist, create it with Pathlib\n directory = f\"{path_to_data_folder}/aizynthfinder\"\n\n Path(directory).mkdir(parents=True, exist_ok=True)\n\n filename = \"uspto_model.hdf5\"\n filepath = f\"{directory}/{filename}\"\n download_file(aizynthfinder_model, filepath)\n\n filename = \"uspto_templates.hdf5\"\n filepath = f\"{directory}/{filename}\"\n download_file(aizynthfinder_templates, filepath)"
},
{
"identifier": "add_logger",
"path": "rbc2/utils/add_logger.py",
"snippet": "def add_logger(name, level='DEBUG'):\n logger = logging.getLogger(name)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(level)\n logger.propagate = False\n return logger"
},
{
"identifier": "path_to_data_folder",
"path": "rbc2/configs/data_path.py",
"snippet": "DEFAULT_DATA_FOLDER = str(Path(__file__).parents[1]) + '/data'\nRBC2_DATA_PATH = os.getenv('RBC2_DATA_PATH')"
},
{
"identifier": "Expansion_Config",
"path": "rbc2/configs/expansion_config.py",
"snippet": "class Expansion_Config():\n\n def __init__(self):\n\n # rule application\n self.allow_chiral_symmetry = False\n self.check_chiral_products = True\n self.combine_enantiomers = True\n self.allow_cyclic_reaction_outcomes = False\n self.clean_brackets = True\n\n # reaction parsing\n self.allow_backwards = False\n self.allow_duplicates = False\n self.duplicates_require_same_expander = True\n self.duplicates_require_same_domain = False\n self.duplicates_require_same_name = False\n self.merge_duplicate_metadata = True\n self.force_rdkit_smis = True\n\n # expanders general\n self.max_reactions = None # max reactions (not options)\n\n # reaction filtering and blocking\n self.use_max_mw_for_enzymes = False\n self.max_mw_to_use_enzymes = 300\n\n\n\n def update_from_dict(self, attr_dict):\n current_dict = self.to_dict()\n for key, value in attr_dict.items():\n if key in current_dict:\n setattr(self, key, value)\n return self\n\n def to_dict(self):\n return self.__dict__"
},
{
"identifier": "load_keras_models",
"path": "rbc2/utils/load_keras_models.py",
"snippet": "def tensorflow_imports():\n def __init__(self, filename):\n def __len__(self):\n def predict(self, *args: np.ndarray, **_: np.ndarray):\n CUSTOM_OBJECTS = {\"top10_acc\": top10_acc, \"top50_acc\": top50_acc}\nclass LocalKerasModel:"
},
{
"identifier": "fingerprints",
"path": "rbc2/utils/fingerprints.py",
"snippet": "def get_mol_fingerprint(rd_mol, radius=2, nBits=2048):\ndef get_reaction_fingerprint(product_mol, substrate_mols, radius=2, nBits=2048):"
}
] | import time
import numpy as np
import pandas as pd
from rdkit import Chem
from rbc2.configs.download_data_files.download_aizynthfinder import does_aizynthfinder_exist, \
download_aizynthfinder_model
from rbc2.utils.add_logger import add_logger
from rbc2.configs.data_path import path_to_data_folder
from rbc2.configs.expansion_config import Expansion_Config
from rbc2.utils import load_keras_models, fingerprints | 1,573 |
data_folder = f'{path_to_data_folder}/aizynthfinder'
class AizynthfinderActionGetter():
def __init__(self,
template_column='retro_template',
cutoff_cumulative=0.995,
cutoff_number=50,
log_level='WARNING'):
self.logger = add_logger('AIZynthfinder_Actions', level=log_level)
self.policy_model = None
self.templates = None
self.template_column = template_column
self.cutoff_cumulative = cutoff_cumulative
self.cutoff_number = cutoff_number
if does_aizynthfinder_exist() == False:
download_aizynthfinder_model()
def load_model(self):
if self.policy_model == None:
policy_path = data_folder + '/uspto_model.hdf5'
self.policy_model = load_keras_models.LocalKerasModel(policy_path)
if self.templates == None:
templates_path = data_folder + '/uspto_templates.hdf5'
self.templates = pd.read_hdf(templates_path, "table")
def get_actions(self, smi):
reactions = []
priors = []
template_column = self.template_column
mol = Chem.MolFromSmiles(smi)
all_transforms_prop = self._predict(mol)
probable_transforms_idx = self._cutoff_predictions(all_transforms_prop)
possible_moves = self.templates.iloc[probable_transforms_idx]
probs = all_transforms_prop[probable_transforms_idx]
priors.extend(probs)
for idx, (move_index, move) in enumerate(possible_moves.iterrows()):
metadata = dict(move)
del metadata[template_column]
metadata["policy_probability"] = round(float(probs[idx]), 5)
metadata["template_code"] = move_index
reaction = {'smarts': move[template_column],
'metadata': metadata,
'prior': priors[idx]}
reactions.append(reaction)
return reactions
def get_rxns(self, smile):
if self.policy_model == None:
self.load_model()
reactions = self.get_actions(smile)
rxns = {}
metadata = {}
for reaction in reactions:
name = f"Chem_{reaction['metadata']['classification']}"
num = 1
extra_string = f"__{num}"
while name+extra_string in rxns:
extra_string = f"__{num}"
num += 1
name = name+extra_string
smarts = reaction['smarts']
if self._does_smarts_only_one_reactants(smarts):
rxns[name] = [smarts]
else:
rxns[name] = []
metadata[name] = reaction['metadata']
return rxns, metadata
def _predict(self, mol):
|
data_folder = f'{path_to_data_folder}/aizynthfinder'
class AizynthfinderActionGetter():
def __init__(self,
template_column='retro_template',
cutoff_cumulative=0.995,
cutoff_number=50,
log_level='WARNING'):
self.logger = add_logger('AIZynthfinder_Actions', level=log_level)
self.policy_model = None
self.templates = None
self.template_column = template_column
self.cutoff_cumulative = cutoff_cumulative
self.cutoff_number = cutoff_number
if does_aizynthfinder_exist() == False:
download_aizynthfinder_model()
def load_model(self):
if self.policy_model == None:
policy_path = data_folder + '/uspto_model.hdf5'
self.policy_model = load_keras_models.LocalKerasModel(policy_path)
if self.templates == None:
templates_path = data_folder + '/uspto_templates.hdf5'
self.templates = pd.read_hdf(templates_path, "table")
def get_actions(self, smi):
reactions = []
priors = []
template_column = self.template_column
mol = Chem.MolFromSmiles(smi)
all_transforms_prop = self._predict(mol)
probable_transforms_idx = self._cutoff_predictions(all_transforms_prop)
possible_moves = self.templates.iloc[probable_transforms_idx]
probs = all_transforms_prop[probable_transforms_idx]
priors.extend(probs)
for idx, (move_index, move) in enumerate(possible_moves.iterrows()):
metadata = dict(move)
del metadata[template_column]
metadata["policy_probability"] = round(float(probs[idx]), 5)
metadata["template_code"] = move_index
reaction = {'smarts': move[template_column],
'metadata': metadata,
'prior': priors[idx]}
reactions.append(reaction)
return reactions
def get_rxns(self, smile):
if self.policy_model == None:
self.load_model()
reactions = self.get_actions(smile)
rxns = {}
metadata = {}
for reaction in reactions:
name = f"Chem_{reaction['metadata']['classification']}"
num = 1
extra_string = f"__{num}"
while name+extra_string in rxns:
extra_string = f"__{num}"
num += 1
name = name+extra_string
smarts = reaction['smarts']
if self._does_smarts_only_one_reactants(smarts):
rxns[name] = [smarts]
else:
rxns[name] = []
metadata[name] = reaction['metadata']
return rxns, metadata
def _predict(self, mol): | fingerprint = fingerprints.get_mol_fingerprint(mol, 2, nBits=len(self.policy_model)) | 6 | 2023-12-30 11:33:41+00:00 | 2k |
DomingoJoseCab/AutoTube | utils/edition/edit.py | [
{
"identifier": "load_videos",
"path": "utils/edition/autoediting.py",
"snippet": "def load_videos(videos_path):\r\n video_list = []\r\n videos = os.listdir(videos_path)\r\n for vid in videos:\r\n video = VideoFileClip(os.path.join(videos_path,vid))\r\n video_list.append(video)\r\n\r\n return video_list\r"
},
{
"identifier": "load_audio",
"path": "utils/edition/autoediting.py",
"snippet": "def load_audio(audio_path):\r\n audio_list = []\r\n audios = os.listdir(audio_path)\r\n for au in audios:\r\n audio = AudioFileClip(os.path.join(audio_path,au))\r\n audio_list.append(audio)\r\n\r\n return audio_list\r"
},
{
"identifier": "generate_product",
"path": "utils/edition/autoediting.py",
"snippet": "def generate_product(video, audio):\r\n ordered_clips = generate_subclip(video)\r\n\r\n repetitions = ceil(audio.duration / sum(clip.duration for clip in ordered_clips))\r\n\r\n final_clips_sequence = ordered_clips * repetitions\r\n\r\n final_clips_sequence = concatenate_videoclips(final_clips_sequence).subclip(0, audio.duration+1)\r\n\r\n final_video = final_clips_sequence.set_audio(CompositeAudioClip([audio.set_start(0.5)]))\r\n\r\n return final_video\r"
},
{
"identifier": "generate_intro",
"path": "utils/edition/autoediting.py",
"snippet": "def generate_intro(videos, audio):\r\n selected_video = choice(videos)\r\n audio_duration = audio.duration\r\n\r\n total_video_duration = audio_duration + 1 \r\n\r\n start_time = choice(range(int(selected_video.duration - total_video_duration)))\r\n video_clip = selected_video.subclip(start_time, start_time + total_video_duration)\r\n\r\n adjusted_audio = CompositeAudioClip([audio.set_start(0.5)])\r\n\r\n video_clip = video_clip.set_audio(adjusted_audio)\r\n\r\n return video_clip\r"
},
{
"identifier": "generate_outro",
"path": "utils/edition/autoediting.py",
"snippet": "def generate_outro(videos, audio):\r\n selected_video = choice(videos)\r\n audio_duration = audio.duration\r\n\r\n clips = generate_subclip(selected_video)\r\n\r\n total_video_duration = audio_duration + 25\r\n\r\n repetitions = ceil(total_video_duration / sum(clip.duration for clip in clips))\r\n\r\n final_clips = clips * repetitions\r\n\r\n final_clips = concatenate_videoclips(final_clips).subclip(0, total_video_duration)\r\n\r\n adjusted_audio = CompositeAudioClip([audio.set_start(0.5)])\r\n\r\n video_clip = final_clips.set_audio(adjusted_audio)\r\n\r\n return video_clip\r"
},
{
"identifier": "title_intro",
"path": "utils/edition/autotext.py",
"snippet": "def title_intro(title:str, video):\r\n \r\n texto = TextClip(title, fontsize=40, color='white', font='Bebas Neue Bold')\r\n texto = texto.set_position('center').set_duration(6)\r\n\r\n color_clip = ColorClip(video.size, color=(0, 0, 0), duration=texto.duration)\r\n color_clip = color_clip.set_opacity(0.9) # Ajusta la opacidad\r\n\r\n color_clip = color_clip.set_start(4)\r\n\r\n texto = texto.set_start(4).crossfadein(1)\r\n\r\n video_opaco = CompositeVideoClip([video, color_clip])\r\n\r\n video_final = CompositeVideoClip([video_opaco, texto])\r\n\r\n return video_final\r"
}
] | import os
import json
from moviepy.editor import CompositeVideoClip
from utils.edition.autoediting import load_videos, load_audio, generate_product, generate_intro, generate_outro
from utils.edition.autotext import title_intro
from moviepy.config import change_settings
| 943 | # ==============================================================================
# AutoTube Script
# Creado por: Domingo Caballero
# Canal de YouTube: https://www.youtube.com/@emprendedomingo?=sub_confirmation=1
# Lista de Correo: https://emprendecondomingo.substack.com/
# ==============================================================================
def main(videos_path, audios_path, output_path, names, base_path):
videos = load_videos(videos_path)
audios = load_audio(audios_path)
audio_intro = audios.pop(0)
audio_outro = audios.pop(-1)
| # ==============================================================================
# AutoTube Script
# Creado por: Domingo Caballero
# Canal de YouTube: https://www.youtube.com/@emprendedomingo?=sub_confirmation=1
# Lista de Correo: https://emprendecondomingo.substack.com/
# ==============================================================================
def main(videos_path, audios_path, output_path, names, base_path):
videos = load_videos(videos_path)
audios = load_audio(audios_path)
audio_intro = audios.pop(0)
audio_outro = audios.pop(-1)
| intro = generate_intro(videos, audio_intro)
| 3 | 2023-12-28 16:15:37+00:00 | 2k |
gregorybchris/typogenetics | tests/test_search.py | [
{
"identifier": "Editor",
"path": "typogenetics/search.py",
"snippet": "class Editor:\n PROB_MUTATE = 0.80\n PROB_INSERT = 0.10\n PROB_DELETE = 0.10\n\n @classmethod\n def edit(cls, strand: Strand, rng: Generator) -> Strand:\n edit_type = cls.select_edit_type(rng)\n if edit_type == EditType.MUTATE:\n return cls.mutate(strand, rng)\n if edit_type == EditType.INSERT:\n return cls.insert(strand, rng)\n if edit_type == EditType.DELETE:\n return cls.delete(strand, rng)\n\n @classmethod\n def mutate(cls, strand: Strand, rng: Generator) -> Strand:\n r1 = rng.integers(0, len(strand))\n new_bases = strand.bases.copy()\n base = new_bases[r1]\n while new_bases[r1] == base:\n all_bases = [Base.A, Base.C, Base.G, Base.T]\n r2 = rng.integers(0, len(all_bases))\n new_bases[r1] = all_bases[r2]\n return Strand(new_bases)\n\n @classmethod\n def insert(cls, strand: Strand, rng: Generator) -> Strand:\n r1 = rng.integers(0, len(strand) + 1)\n new_bases = strand.bases.copy()\n all_bases = [Base.A, Base.C, Base.G, Base.T]\n r2 = rng.integers(0, len(all_bases))\n new_bases.insert(r1, all_bases[r2])\n return Strand(new_bases)\n\n @classmethod\n def delete(cls, strand: Strand, rng: Generator) -> Strand:\n r1 = rng.integers(0, len(strand))\n new_bases = strand.bases.copy()\n new_bases.pop(r1)\n return Strand(new_bases)\n\n @classmethod\n def select_edit_type(cls, rng: Generator) -> EditType:\n r = rng.random()\n edit_types = [\n (EditType.MUTATE, cls.PROB_MUTATE),\n (EditType.INSERT, cls.PROB_INSERT),\n (EditType.DELETE, cls.PROB_DELETE),\n ]\n assert np.isclose(sum(dict(edit_types).values()), 1.0)\n for edit_type, prob in edit_types:\n if r <= prob:\n return edit_type\n r -= prob\n raise ValueError(\"Random number is not in range [0, 1]\")"
},
{
"identifier": "EditType",
"path": "typogenetics/search.py",
"snippet": "class EditType(StrEnum):\n MUTATE = auto()\n INSERT = auto()\n DELETE = auto()"
},
{
"identifier": "Strand",
"path": "typogenetics/typogenetics.py",
"snippet": "class Strand:\n bases: List[Base]\n\n @classmethod\n def from_str(cls, strand_str: str) -> \"Strand\":\n bases = []\n for base_str in strand_str:\n if base_str == \" \":\n continue\n base = Base.from_str(base_str)\n bases.append(base)\n return cls(bases)\n\n def iter_bases(self) -> Iterator[Base]:\n yield from self.bases\n\n def iter_duplets(self) -> Iterator[Duplet]:\n unit = 0\n while True:\n if unit + 1 >= len(self):\n break\n\n yield (self[unit], self[unit + 1])\n\n unit += 2\n\n def __repr__(self) -> str:\n return \"\".join([str(b) for b in self.bases])\n\n def __str__(self) -> str:\n return self.__repr__()\n\n def __getitem__(self, unit: int) -> Base:\n return self.bases[unit]\n\n def __len__(self) -> int:\n return len(self.bases)"
}
] | import numpy as np
from typogenetics.search import Editor, EditType
from typogenetics.typogenetics import Strand | 993 |
class TestSearch:
def test_select_edit_type(self) -> None:
rng = np.random.default_rng(42)
assert Editor.select_edit_type(rng) == EditType.INSERT
def test_mutate(self) -> None:
rng = np.random.default_rng(42)
|
class TestSearch:
def test_select_edit_type(self) -> None:
rng = np.random.default_rng(42)
assert Editor.select_edit_type(rng) == EditType.INSERT
def test_mutate(self) -> None:
rng = np.random.default_rng(42) | strand = Strand.from_str("ACGT") | 2 | 2023-12-28 08:59:06+00:00 | 2k |
chaoren2357/gsplatstudio | gsplatstudio/data/processor/colmapWcam_processor.py | [
{
"identifier": "BaseDataProcessor",
"path": "gsplatstudio/data/processor/base_processor.py",
"snippet": "class BaseDataProcessor(ABC):\n def __init__(self, cfg, logger, source_path) -> None:\n self.cfg = parse_structured(self.config_class, cfg)\n self.logger = logger\n self.source_path_str = source_path\n \n @property\n @abstractmethod\n def config_class(self):\n pass\n\n @property\n def should_skip(self):\n pass\n\n @abstractmethod\n def run(self):\n pass\n\n def run_command_with_realtime_output(self, cmd):\n \"\"\"\n Run the specified command and output the results in real-time.\n\n :param cmd: The command string to run.\n :return: The exit code of the command.\n \"\"\"\n self.logger.info(f\"Running command: {cmd}\")\n # Start the process\n process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)\n\n # Read output in real-time\n while True:\n output = process.stdout.readline()\n if output == '' and process.poll() is not None:\n break\n if output:\n self.logger.verbose(output.strip()) \n\n # Read any remaining error output\n stderr_output = process.stderr.read()\n if stderr_output:\n self.logger.error(\"Error Output:\") \n self.logger.error(stderr_output.strip())\n\n # Return the exit code\n return process.returncode"
},
{
"identifier": "load_json",
"path": "gsplatstudio/utils/general_utils.py",
"snippet": "def load_json(json_file):\n with open(json_file, 'r') as file:\n return json.load(file)"
},
{
"identifier": "transform_camera_from_carla_matrix_to_colmap_quaternion",
"path": "gsplatstudio/utils/camera_utils.py",
"snippet": "def transform_camera_from_carla_matrix_to_colmap_quaternion(camera_data):\n x_carla,y_carla,z_carla,roll_carla,pitch_carla,yaw_carla = camera_data['x'],camera_data['y'],camera_data['z'],camera_data['roll'],camera_data['pitch'],camera_data['yaw']\n x = y_carla\n y = -z_carla\n z = x_carla\n roll = pitch_carla\n pitch = yaw_carla\n yaw = roll_carla\n C2W_matrix = get_transform_matrix(x, y, z, pitch, roll, yaw)\n W2C_matrix = np.linalg.inv(C2W_matrix)\n W2C_quaternion = rotmat2qvec(W2C_matrix[:3, :3])\n W2C_translation = W2C_matrix[:3, 3]\n return W2C_quaternion, W2C_translation"
},
{
"identifier": "fov_to_focal_length",
"path": "gsplatstudio/utils/camera_utils.py",
"snippet": "def fov_to_focal_length(fov_degrees, width):\n fov_radians = np.radians(fov_degrees)\n focal_length = (width / 2) / np.tan(fov_radians / 2)\n return focal_length"
}
] | import gsplatstudio
import sqlite3
from gsplatstudio.utils.type_utils import *
from gsplatstudio.data.processor.base_processor import BaseDataProcessor
from pathlib import Path
from gsplatstudio.utils.general_utils import load_json
from gsplatstudio.utils.camera_utils import transform_camera_from_carla_matrix_to_colmap_quaternion, fov_to_focal_length | 1,346 |
@dataclass
class ColmapWithCamProcessorConfig:
use_gpu: bool = True
camera: str = "OPENCV"
map_ba_global_function_tolerance: float = 0.000001
@gsplatstudio.register("colmap_with_cam-processor")
class ColmapWithCamProcessor(BaseDataProcessor):
def __init__(self, cfg, logger, source_path) -> None:
super().__init__(cfg, logger, source_path)
@property
def config_class(self):
return ColmapWithCamProcessorConfig
@property
def should_skip(self):
cameras_file = Path(self.source_path_str) / "sparse" / "0" / "cameras.bin"
images_file = Path(self.source_path_str) / "sparse" / "0" / "images.bin"
points3D_file = Path(self.source_path_str) / "sparse" / "0" / "points3D.bin"
return cameras_file.exists() and images_file.exists() and points3D_file.exists()
def run(self):
self.logger.info("Start running ColmapWithCamProcessorConfig...")
project_folder = Path(self.source_path_str) / "distorted"
project_folder.mkdir(parents=True, exist_ok=True)
database_path = Path(self.source_path_str) / "distorted" / "database.db"
image_distorted_folder = Path(self.source_path_str) / "input"
camera_folder = Path(self.source_path_str) / "camera"
## Feature extraction
feature_extractor_cmd = "colmap feature_extractor" + \
f" --database_path {str(database_path)}" + \
f" --image_path {str(image_distorted_folder)}" + \
f" --ImageReader.single_camera 1" + \
f" --ImageReader.camera_model {self.cfg.camera}" + \
f" --SiftExtraction.use_gpu {int(self.cfg.use_gpu)}"
exit_code = self.run_command_with_realtime_output(feature_extractor_cmd)
if exit_code != 0:
self.logger.error(f"Feature extraction failed with code {exit_code}. Exiting.")
exit(exit_code)
self.logger.info("Finish feature extraction...")
## Create points3D.txt
points3D_txt_path = project_folder / 'points3D.txt'
open(str(points3D_txt_path), 'w').close()
## Create camera.txt
camera_txt_path = project_folder / 'cameras.txt'
open(str(camera_txt_path), 'w').close()
unique_cameras = {}
camera_id = 1
for camera_file in camera_folder.glob('*.json'):
camera_data = load_json(camera_file)
intrinsics = camera_data['intrinsics']
|
@dataclass
class ColmapWithCamProcessorConfig:
use_gpu: bool = True
camera: str = "OPENCV"
map_ba_global_function_tolerance: float = 0.000001
@gsplatstudio.register("colmap_with_cam-processor")
class ColmapWithCamProcessor(BaseDataProcessor):
def __init__(self, cfg, logger, source_path) -> None:
super().__init__(cfg, logger, source_path)
@property
def config_class(self):
return ColmapWithCamProcessorConfig
@property
def should_skip(self):
cameras_file = Path(self.source_path_str) / "sparse" / "0" / "cameras.bin"
images_file = Path(self.source_path_str) / "sparse" / "0" / "images.bin"
points3D_file = Path(self.source_path_str) / "sparse" / "0" / "points3D.bin"
return cameras_file.exists() and images_file.exists() and points3D_file.exists()
def run(self):
self.logger.info("Start running ColmapWithCamProcessorConfig...")
project_folder = Path(self.source_path_str) / "distorted"
project_folder.mkdir(parents=True, exist_ok=True)
database_path = Path(self.source_path_str) / "distorted" / "database.db"
image_distorted_folder = Path(self.source_path_str) / "input"
camera_folder = Path(self.source_path_str) / "camera"
## Feature extraction
feature_extractor_cmd = "colmap feature_extractor" + \
f" --database_path {str(database_path)}" + \
f" --image_path {str(image_distorted_folder)}" + \
f" --ImageReader.single_camera 1" + \
f" --ImageReader.camera_model {self.cfg.camera}" + \
f" --SiftExtraction.use_gpu {int(self.cfg.use_gpu)}"
exit_code = self.run_command_with_realtime_output(feature_extractor_cmd)
if exit_code != 0:
self.logger.error(f"Feature extraction failed with code {exit_code}. Exiting.")
exit(exit_code)
self.logger.info("Finish feature extraction...")
## Create points3D.txt
points3D_txt_path = project_folder / 'points3D.txt'
open(str(points3D_txt_path), 'w').close()
## Create camera.txt
camera_txt_path = project_folder / 'cameras.txt'
open(str(camera_txt_path), 'w').close()
unique_cameras = {}
camera_id = 1
for camera_file in camera_folder.glob('*.json'):
camera_data = load_json(camera_file)
intrinsics = camera_data['intrinsics'] | focal_length = fov_to_focal_length(intrinsics['fov'], intrinsics['width']) | 3 | 2023-12-22 08:27:26+00:00 | 2k |
ddjerqq/beam | src/util.py | [
{
"identifier": "User",
"path": "src/types/user.py",
"snippet": "class User:\n id: int\n username: str\n avatar_url: str"
},
{
"identifier": "Video",
"path": "src/types/video.py",
"snippet": "class Video:\n \"\"\"Tiktok video object\"\"\"\n\n id: str\n \"\"\"Unique identifier for the TikTok video. Also called \"item_id\"\"\"\n\n create_time: int\n \"\"\"UTC Unix epoch (in seconds) of when the TikTok video was posted.\"\"\"\n\n cover_image_url: str\n \"\"\"A CDN link for the video's cover image. The image is static. Due to our trust and safety policies, the link has a TTL of 6 hours.\"\"\"\n\n share_url: str\n \"\"\"A shareable link for this TikTok video. Note that the website behaves differently on Mobile and Desktop devices.\"\"\"\n\n video_description: str\n \"\"\"The description that the creator has set for the TikTok video. Max length: 150\"\"\"\n\n duration: int\n \"\"\"The duration of the TikTok video in seconds.\"\"\"\n\n height: int\n \"\"\"The height of the TikTok video.\"\"\"\n\n width: int\n \"\"\"The width of the TikTok video.\"\"\"\n\n title: str\n \"\"\"The video title. Max length: 150\"\"\"\n\n embed_html: str\n \"\"\"HTML code for embedded video\"\"\"\n\n embed_link: str\n \"\"\"Video embed link of tiktok.com\"\"\"\n\n like_count: int\n \"\"\"Number of likes for the video\"\"\"\n\n comment_count: int\n \"\"\"Number of comments on the video\"\"\"\n\n share_count: int\n \"\"\"Number of shares of the video\"\"\"\n\n view_count: int\n \"\"\"Number of views of the video\"\"\"\n\n @property\n def create_timestamp(self) -> datetime.datetime:\n return datetime.datetime.fromtimestamp(self.create_time, tz=datetime.UTC)"
}
] | import os
import httpx
from src.types.user import User
from src.types.video import Video | 661 |
def get_env(key: str, default: str = None) -> str:
"""
gets the environment variable with the given key,
or raises an exception if the default is not supplied.
"""
var = os.getenv("APP_ID", default)
if var is not None:
return var
raise Exception(f"Environment variable {key} not found.")
def humanize(num: int) -> str:
"""
converts a number to a human readable format.
"""
if num < 1000:
return str(num)
num = num / 1000
if num < 1000:
return f"{num:.1f}k"
num = num / 1000
if num < 1000:
return f"{num:.1f}m"
num = num / 1000
return f"{num:.1f}b"
|
def get_env(key: str, default: str = None) -> str:
"""
gets the environment variable with the given key,
or raises an exception if the default is not supplied.
"""
var = os.getenv("APP_ID", default)
if var is not None:
return var
raise Exception(f"Environment variable {key} not found.")
def humanize(num: int) -> str:
"""
converts a number to a human readable format.
"""
if num < 1000:
return str(num)
num = num / 1000
if num < 1000:
return f"{num:.1f}k"
num = num / 1000
if num < 1000:
return f"{num:.1f}m"
num = num / 1000
return f"{num:.1f}b"
| def video_info_to_webhook_payload(author: User, video: Video) -> dict[str, str]: | 1 | 2023-12-28 23:18:25+00:00 | 2k |
onestepai/api_rag | service.py | [
{
"identifier": "ServiceApiConfig",
"path": "src/config/ServiceApiConfig.py",
"snippet": "class ServiceApiConfig(ServiceApiConfigBase):\n def __init__(self):\n ServiceApiConfigBase.__init__(self,\n url_prefix=DockerConfig.URL_PREFIX + DockerConfig.API_VERSION,\n\n version=DockerConfig.API_VERSION,\n title=DockerConfig.API_TITLE,\n description=DockerConfig.API_DESCRIPTION,\n gpt_api_key= DockerConfig.GPT_API_KEY,\n gpt_4_model= DockerConfig.GPT_API_VERSION_4,\n gpt_3_5_model=DockerConfig.GPT_API_VERSION_35,\n prompt_language=DockerConfig.PROMPT_LANGUAGE\n )\n self.__set_predict_request()\n self.__set_predict_response()\n\n def __set_predict_request(self):\n request = ServiceApiConfigBase.api.model('PredictRequest.extractResult', {\n 'utterance': fields.String(description='content'),\n 'model_name': fields.String(description='model name'),\n 'language': fields.String(description='language')\n })\n predict_request = ServiceApiConfigBase.api.model('PredictRequest', {\n 'requestId': fields.String(description='request id'),\n 'request': fields.Nested(request, description='request'),\n 'timestamp': fields.Integer(description='calling timestamp')\n })\n ServiceApiConfigBase.predict_request = predict_request\n\n def __set_predict_response(self):\n response_result = ServiceApiConfigBase.api.model('PredictResponse.responseResult', {\n 'result': fields.String(description='result'),\n 'content': fields.String(description='content')\n })\n predict_response = ServiceApiConfigBase.api.model('PredictResponse', {\n 'requestId': fields.String(description='request id'),\n 'responseResult': fields.Nested(response_result, description='responseResult'),\n 'timestamp': fields.Integer(description='calling timestamp')\n })\n ServiceApiConfigBase.predict_response = predict_response"
},
{
"identifier": "DockerConfig",
"path": "src/config/DockerConfig.py",
"snippet": "class DockerConfig(object):\n GPT_API_KEY = MyEnvironment().get_environment_variable(\"OPENAPI_API_KEY\", 'Your open ai key')\n API_VERSION = MyEnvironment().get_environment_variable(\"API_VERSION\", '1.0')\n GPT_API_VERSION_35 = MyEnvironment().get_environment_variable(\"GPT_3.5\", 'gpt-3.5-turbo-1106')\n GPT_API_VERSION_4 = MyEnvironment().get_environment_variable(\"GPT_4\", 'gpt-4-1106-preview')\n URL_PREFIX = MyEnvironment().get_environment_variable(\"URL_PREFIX\", '/api_rag/')\n SERVICE_PORT = MyEnvironment().get_environment_variable(\"PORT\", '5000')\n API_TITLE = MyEnvironment().get_environment_variable(\"API_TITLE\", 'API RAG Service')\n API_DESCRIPTION = MyEnvironment().get_environment_variable(\"API_DESCRIPTION\", 'API RAG Service')\n PROMPT_LANGUAGE = MyEnvironment().get_environment_variable(\"PROMPT_LANGUAGE\", \"zh_cn\")"
},
{
"identifier": "ModelHandler",
"path": "src/api_rag/ModelHandler.py",
"snippet": "class ModelHandler(ModelBaseHandler):\n V1 = \"v1\"\n\n def __init__(self, config):\n ModelBaseHandler.__init__(self, config)\n self._version = ModelHandler.V1\n self.create_model()\n\n\n def create_model(self):\n if self._version == ModelHandler.V1:\n self._predictor = APIRAGModel()\n\n def predict(self, request, **kwargs):\n # try:\n LoggerHelper().log_info(u\"Request: \" + str(request))\n contents = request[\"request\"][\"content\"]\n data = json.loads(contents)\n if \"clean_context\" in list(data.keys()):\n final_result = \"Reset successfully.\"\n else:\n text = data[\"utterance\"]\n model_name = data[\"model_name\"]\n LoggerHelper().log_info(u\"date_text_content: \" + str(text))\n final_result = self._predictor.predict(text,model_name)\n response_predict = self.create_predict_response(request,final_result)\n if response_predict is not None:\n return response_predict\n\n\n def create_predict_response(self, request, predict_sent):\n response = {\n 'requestId': request['requestId'] if 'requestId' in request else '',\n 'timestamp': time.time(),\n 'response': predict_sent\n }\n return {\n 'requestId': request['requestId'] if 'requestId' in request else '',\n 'timestamp': time.time(),\n 'responseResult': {\n 'result': 'success',\n 'content': json.dumps(response, ensure_ascii=False)\n }\n }"
}
] | import logging
from src.config.ServiceApiConfig import ServiceApiConfig
from src.config.DockerConfig import DockerConfig
from src.api_rag.ModelHandler import ModelHandler | 1,153 |
logging.getLogger().setLevel(logging.INFO)
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
if __name__ == '__main__':
|
logging.getLogger().setLevel(logging.INFO)
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
if __name__ == '__main__': | config = ServiceApiConfig() | 0 | 2023-12-28 03:13:03+00:00 | 2k |
DerwenAI/textgraphs | textgraphs/graph.py | [
{
"identifier": "Edge",
"path": "textgraphs/elem.py",
"snippet": "class Edge:\n \"\"\"\nA data class representing an edge between two nodes.\n \"\"\"\n src_node: int\n dst_node: int\n kind: RelEnum\n rel: str\n prob: float\n count: int = 1"
},
{
"identifier": "Node",
"path": "textgraphs/elem.py",
"snippet": "class Node: # pylint: disable=R0902\n \"\"\"\nA data class representing one node, i.e., an extracted phrase.\n \"\"\"\n node_id: int\n key: str\n span: typing.Union[ spacy.tokens.span.Span, spacy.tokens.token.Token ]\n text: str\n pos: str\n kind: NodeEnum\n loc: typing.List[ typing.List[ int ] ] = field(default_factory = lambda: [])\n label: typing.Optional[ str ] = None\n length: int = 1\n sub_obj: bool = False\n count: int = 0\n neighbors: int = 0\n weight: float = 0.0\n entity: typing.List[ LinkedEntity ] = field(default_factory = lambda: [])\n annotated: bool = False\n\n\n def get_linked_label (\n self\n ) -> typing.Optional[ str ]:\n \"\"\"\nWhen this node has a linked entity, return that IRI.\nOtherwise return its `label` value.\n\n returns:\na label for the linked entity\n \"\"\"\n if len(self.entity) > 0:\n return self.entity[0].iri\n\n return self.label\n\n\n def get_name (\n self\n ) -> str:\n \"\"\"\nReturn a brief name for the graphical depiction of this Node.\n\n returns:\nbrief label to be used in a graph\n \"\"\"\n if self.kind == NodeEnum.IRI:\n return self.label # type: ignore\n if self.kind == NodeEnum.LEM:\n return self.key\n\n return self.text\n\n\n def get_stacked_count (\n self\n ) -> int:\n \"\"\"\nReturn a modified count, to redact verbs and linked entities from\nthe stack-rank partitions.\n\n returns:\ncount, used for re-ranking extracted entities\n \"\"\"\n if self.pos == \"VERB\" or self.kind == NodeEnum.IRI:\n return 0\n\n return self.count\n\n\n def get_pos (\n self\n ) -> typing.Tuple[ int, int ]:\n \"\"\"\nGenerate a position span for `OpenNRE`.\n\n returns:\na position span needed for `OpenNRE` relation extraction\n \"\"\"\n position: typing.Tuple[ int, int ] = ( self.span.idx, self.span.idx + len(self.text) - 1, )\n return position"
},
{
"identifier": "NodeEnum",
"path": "textgraphs/elem.py",
"snippet": "class NodeEnum (enum.IntEnum):\n \"\"\"\nEnumeration for the kinds of node categories\n \"\"\"\n DEP = 0 # `spaCy` parse dependency\n LEM = 1 # lemmatized token\n ENT = 2 # named entity\n CHU = 3 # noun chunk\n IRI = 4 # IRI for linked entity\n\n def __str__ (\n self\n ) -> str:\n \"\"\"\nCodec for representing as a string.\n\n returns:\ndecoded string representation of the enumerated value\n \"\"\"\n decoder: typing.List[ str ] = [\n \"dep\",\n \"lem\",\n \"ent\",\n \"chu\",\n \"iri\",\n ]\n\n return decoder[self.value]"
},
{
"identifier": "RelEnum",
"path": "textgraphs/elem.py",
"snippet": "class RelEnum (enum.IntEnum):\n \"\"\"\nEnumeration for the kinds of edge relations\n \"\"\"\n DEP = 0 # `spaCy` parse dependency\n CHU = 1 # `spaCy` noun chunk\n INF = 2 # `REBEL` or `OpenNRE` inferred relation\n SYN = 3 # `sense2vec` inferred synonym\n IRI = 4 # `DBPedia` or `Wikidata` linked entity\n\n def __str__ (\n self\n ) -> str:\n \"\"\"\nCodec for representing as a string.\n\n returns:\ndecoded string representation of the enumerated value\n \"\"\"\n decoder: typing.List[ str ] = [\n \"dep\",\n \"inf\",\n \"syn\",\n \"chu\",\n \"iri\",\n ]\n\n return decoder[self.value]"
}
] | from collections import OrderedDict
from icecream import ic # pylint: disable=E0401
from .elem import Edge, Node, NodeEnum, RelEnum
import json
import typing
import networkx as nx # pylint: disable=E0401
import spacy # pylint: disable=E0401 | 1,287 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This class implements a generic, in-memory graph data structure used
to represent the _lemma graph_.
see copyright/license https://huggingface.co/spaces/DerwenAI/textgraphs/blob/main/README.md
"""
######################################################################
## class definitions
class SimpleGraph:
"""
An in-memory graph used to build a `MultiDiGraph` in NetworkX.
"""
def __init__ (
self
) -> None:
"""
Constructor.
"""
self.nodes: typing.Dict[ str, Node ] = OrderedDict()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This class implements a generic, in-memory graph data structure used
to represent the _lemma graph_.
see copyright/license https://huggingface.co/spaces/DerwenAI/textgraphs/blob/main/README.md
"""
######################################################################
## class definitions
class SimpleGraph:
"""
An in-memory graph used to build a `MultiDiGraph` in NetworkX.
"""
def __init__ (
self
) -> None:
"""
Constructor.
"""
self.nodes: typing.Dict[ str, Node ] = OrderedDict() | self.edges: typing.Dict[ str, Edge ] = {} | 0 | 2023-12-25 11:42:53+00:00 | 2k |
Noubissie237/StockManagment | StockManagment/App/views.py | [
{
"identifier": "panier_cookie",
"path": "StockManagment/App/utils.py",
"snippet": "def panier_cookie(request):\n articles = []\n\n commande = {\n 'get_panier_total':0,\n 'get_panier_article':0,\n 'produit_physique': True,\n }\n\n nombre_article = commande['get_panier_article']\n\n try:\n panier = json.loads(request.COOKIES.get('panier'))\n for obj in panier:\n\n nombre_article += panier[obj]['qte']\n\n produit = Produit.objects.get(id=obj)\n\n total = produit.price * panier[obj]['qte']\n\n commande['get_panier_article'] += panier[obj]['qte']\n\n commande['get_panier_total'] += total\n\n article = {\n 'produit': {\n 'pk': produit.id,\n 'name': produit.name,\n 'price': produit.price,\n 'nombre': produit.nombre\n },\n 'quantite': panier[obj]['qte'],\n 'get_total': total\n\n }\n\n articles.append(article)\n\n if produit.digital == False:\n commande['produit_physique'] = True\n \n except:\n pass\n\n context = {\n 'articles' : articles, \n 'commande': commande,\n 'nombre_article': nombre_article\n }\n\n return context"
},
{
"identifier": "data_cookie",
"path": "StockManagment/App/utils.py",
"snippet": "def data_cookie(request):\n\n if request.user.is_authenticated:\n\n client = request.user.client\n\n commande, created = Commande.objects.get_or_create(client=client, complete=False)\n\n articles = commande.commandearticle_set.all()\n\n nombre_article = commande.get_panier_article\n\n else:\n\n cookie_panier = panier_cookie(request)\n articles = cookie_panier['articles']\n commande = cookie_panier['commande']\n nombre_article = cookie_panier['nombre_article']\n\n context = {\n 'articles' : articles, \n 'commande': commande,\n 'nombre_article': nombre_article\n }\n\n return context"
},
{
"identifier": "getDataFromApi",
"path": "StockManagment/App/utils.py",
"snippet": "def getDataFromApi(request):\n try:\n url = \"http://localhost:8000/api/prescriptions/\"\n\n response = requests.get(url)\n \n dataToSave = response.json()\n\n for elt in dataToSave:\n\n if not User.objects.filter(username=elt['email']).exists():\n\n user = User.objects.create_user(username=elt['email'], email=elt['email'], password=elt['Token'])\n\n user.save()\n\n \n if Prescription.objects.filter(email=elt['email']).exists():\n pass\n else:\n tmp = Prescription(nom=elt['nom'], prenom=elt['prenom'], age=elt['age'], sexe=elt['sexe'], email=elt['email'],\n antecedent=elt['antecedent'], prescription1=elt['prescription1'], prescription2=elt['prescription2'], \n prescription3=elt['prescription3'])\n tmp.save()\n\n try:\n user = User.objects.get(username=elt['email'])\n\n client = Client.objects.create(user=user, name=elt[\"nom\"], email=elt['email'])\n\n print(\"valid\")\n\n except:\n print('invalid')\n\n return \"SUCCESS\"\n \n except:\n return \"FAILED\""
},
{
"identifier": "LoginForm",
"path": "StockManagment/App/forms.py",
"snippet": "class LoginForm(forms.Form):\n username = forms.CharField(label='Nom d\\'utilisateur', widget=forms.TextInput(attrs={'class': 'form-control'}))\n password = forms.CharField(label='Mot de passe', widget=PasswordInputWithClass())"
}
] | from django.shortcuts import render, redirect
from django.http import JsonResponse, HttpResponse
from .models import *
from django.contrib.auth.decorators import login_required
from datetime import datetime
from .utils import panier_cookie, data_cookie, getDataFromApi
from .forms import LoginForm
from django.contrib.auth import authenticate, login, logout
import json, requests | 1,475 |
@login_required(login_url='/login')
def shop(request, *args, **kwargs):
"""Vue des produits"""
produits = Produit.objects.all()
data = data_cookie(request)
articles = data['articles']
commande = data['commande']
nombre_article = data['nombre_article']
context = {
'produits': produits,
'nombre_article': nombre_article
}
return render(request, 'shop/index.html', context)
@login_required(login_url='/login')
def panier(request, *args, **kwargs):
data = data_cookie(request)
articles = data['articles']
commande = data['commande']
nombre_article = data['nombre_article']
context = {
'articles' : articles,
'commande': commande,
'nombre_article': nombre_article
}
return render(request, 'shop/panier.html', context)
@login_required(login_url='/login')
def commande(request, *args, **kwargs):
data = data_cookie(request)
articles = data['articles']
commande = data['commande']
nombre_article = data['nombre_article']
context = {
'articles' : articles,
'commande': commande,
'nombre_article': nombre_article
}
return render(request, 'shop/commande.html', context)
@login_required(login_url='/login')
def update_article(request, *args, **kwargs):
data = json.loads(request.body)
produit_id = data['produit_id']
action = data['action']
produit = Produit.objects.get(id=produit_id)
client = request.user.client
commande, created = Commande.objects.get_or_create(client=client, complete=False)
commande_article, created = CommandeArticle.objects.get_or_create(commande=commande, produit=produit)
if action == "add":
commande_article.quantite += 1
if action == "remove":
commande_article.quantite -=1
commande_article.save()
if commande_article.quantite <= 0:
commande_article.delete()
return JsonResponse("panier modifié", safe=False)
@login_required(login_url='/login')
def commandeAnonyme(request, data):
name = data['form']['name']
username = data['form']['username']
email = data['form']['email']
phone = data['form']['phone']
|
@login_required(login_url='/login')
def shop(request, *args, **kwargs):
"""Vue des produits"""
produits = Produit.objects.all()
data = data_cookie(request)
articles = data['articles']
commande = data['commande']
nombre_article = data['nombre_article']
context = {
'produits': produits,
'nombre_article': nombre_article
}
return render(request, 'shop/index.html', context)
@login_required(login_url='/login')
def panier(request, *args, **kwargs):
data = data_cookie(request)
articles = data['articles']
commande = data['commande']
nombre_article = data['nombre_article']
context = {
'articles' : articles,
'commande': commande,
'nombre_article': nombre_article
}
return render(request, 'shop/panier.html', context)
@login_required(login_url='/login')
def commande(request, *args, **kwargs):
data = data_cookie(request)
articles = data['articles']
commande = data['commande']
nombre_article = data['nombre_article']
context = {
'articles' : articles,
'commande': commande,
'nombre_article': nombre_article
}
return render(request, 'shop/commande.html', context)
@login_required(login_url='/login')
def update_article(request, *args, **kwargs):
data = json.loads(request.body)
produit_id = data['produit_id']
action = data['action']
produit = Produit.objects.get(id=produit_id)
client = request.user.client
commande, created = Commande.objects.get_or_create(client=client, complete=False)
commande_article, created = CommandeArticle.objects.get_or_create(commande=commande, produit=produit)
if action == "add":
commande_article.quantite += 1
if action == "remove":
commande_article.quantite -=1
commande_article.save()
if commande_article.quantite <= 0:
commande_article.delete()
return JsonResponse("panier modifié", safe=False)
@login_required(login_url='/login')
def commandeAnonyme(request, data):
name = data['form']['name']
username = data['form']['username']
email = data['form']['email']
phone = data['form']['phone']
| cookie_panier = panier_cookie(request) | 0 | 2023-12-29 11:13:34+00:00 | 2k |
kokiez/raydium-convert-SOLorTokens | main.py | [
{
"identifier": "fetch_pool_keys",
"path": "pools.py",
"snippet": "def fetch_pool_keys(mint: str):\r\n amm_info = {}\r\n all_pools = {}\r\n try:\r\n # Using this so it will be faster else no option, we go the slower way.\r\n with open('all_pools.json', 'r') as file:\r\n all_pools = json.load(file)\r\n amm_info = extract_pool_info(all_pools, mint)\r\n except:\r\n resp = requests.get('https://api.raydium.io/v2/sdk/liquidity/mainnet.json', stream=True)\r\n pools = resp.json()\r\n official = pools['official']\r\n unofficial = pools['unOfficial'] \r\n all_pools = official + unofficial\r\n\r\n # Store all_pools in a JSON file\r\n with open('all_pools.json', 'w') as file:\r\n json.dump(all_pools, file, default=lambda x: x.__dict__)\r\n amm_info = extract_pool_info(all_pools, mint)\r\n\r\n return {\r\n 'amm_id': Pubkey.from_string(amm_info['id']),\r\n 'authority': Pubkey.from_string(amm_info['authority']),\r\n 'base_mint': Pubkey.from_string(amm_info['baseMint']),\r\n 'base_decimals': amm_info['baseDecimals'],\r\n 'quote_mint': Pubkey.from_string(amm_info['quoteMint']),\r\n 'quote_decimals': amm_info['quoteDecimals'],\r\n 'lp_mint': Pubkey.from_string(amm_info['lpMint']),\r\n 'open_orders': Pubkey.from_string(amm_info['openOrders']),\r\n 'target_orders': Pubkey.from_string(amm_info['targetOrders']),\r\n 'base_vault': Pubkey.from_string(amm_info['baseVault']),\r\n 'quote_vault': Pubkey.from_string(amm_info['quoteVault']),\r\n 'market_id': Pubkey.from_string(amm_info['marketId']),\r\n 'market_base_vault': Pubkey.from_string(amm_info['marketBaseVault']),\r\n 'market_quote_vault': Pubkey.from_string(amm_info['marketQuoteVault']),\r\n 'market_authority': Pubkey.from_string(amm_info['marketAuthority']),\r\n 'bids': Pubkey.from_string(amm_info['marketBids']),\r\n 'asks': Pubkey.from_string(amm_info['marketAsks']),\r\n 'event_queue': Pubkey.from_string(amm_info['marketEventQueue'])\r\n }\r"
},
{
"identifier": "make_simulate_pool_info_instruction",
"path": "pools.py",
"snippet": "def make_simulate_pool_info_instruction(accounts):\r\n\r\n keys = [\r\n AccountMeta(pubkey=accounts[\"amm_id\"], is_signer=False, is_writable=False),\r\n AccountMeta(pubkey=accounts[\"authority\"], is_signer=False, is_writable=False),\r\n AccountMeta(pubkey=accounts[\"open_orders\"], is_signer=False, is_writable=False),\r\n AccountMeta(pubkey=accounts[\"base_vault\"], is_signer=False, is_writable=False),\r\n AccountMeta(pubkey=accounts[\"quote_vault\"], is_signer=False, is_writable=False),\r\n AccountMeta(pubkey=accounts[\"lp_mint\"], is_signer=False, is_writable=False),\r\n AccountMeta(pubkey=accounts[\"market_id\"], is_signer=False, is_writable=False), \r\n AccountMeta(pubkey=accounts['event_queue'], is_signer=False, is_writable=False), \r\n \r\n \r\n ]\r\n data = POOL_INFO_LAYOUT.build(\r\n dict(\r\n instruction=12,\r\n simulate_type=0\r\n )\r\n )\r\n return Instruction(AMM_PROGRAM_ID, data, keys)\r"
}
] | from solana.rpc.commitment import Commitment
from solana.rpc.api import Client
from solana.transaction import Transaction
from solders.keypair import Keypair
from pools import fetch_pool_keys, make_simulate_pool_info_instruction
from ast import literal_eval
import re
| 1,536 |
LIQUIDITY_FEES_NUMERATOR = 25
LIQUIDITY_FEES_DENOMINATOR = 10000
"""
Required Variables
"""
endpoint = "your_rpc_url"
payer = Keypair.from_base58_string("your_private_key")
token = "ca of your mint/mint address"
solana_client = Client(endpoint, commitment=Commitment("confirmed"), blockhash_cache=True)
def calculateAmountOut(amount, pool_info):
status = pool_info['status']
SWAP_decimals = pool_info['coin_decimals'] #swap coin
SOL_decimals = pool_info['pc_decimals'] #SOL
COIN_lp_decimals = pool_info['lp_decimals'] #swap coin
pool_SOL_amount = pool_info['pool_pc_amount'] #sol
pool_SWAP_amount = pool_info['pool_coin_amount'] #coin
Coin_pool_lp_supply = pool_info['pool_lp_supply'] #coin
reserve_in = pool_SOL_amount
reserve_out = pool_SWAP_amount
current_price = reserve_out / reserve_in
# print(f"Current Price in SOL: {current_price:.12f}")
amount_in = amount * 10 ** SOL_decimals
Fees = (amount_in * LIQUIDITY_FEES_NUMERATOR)/LIQUIDITY_FEES_DENOMINATOR
amount_in_with_fee = amount_in - Fees
amountOutRaw = (reserve_out * amount_in_with_fee) / (reserve_in + amount_in_with_fee)
# Slippage = 1 + slippage
# minimumAmountOut = amountOutRaw / slippage
return amountOutRaw / 10 ** SWAP_decimals
def calculateAmountIn(amount, pool_info):
SWAP_decimals = pool_info['coin_decimals'] #swap coin
SOL_decimals = pool_info['pc_decimals'] #SOL
COIN_lp_decimals = pool_info['lp_decimals'] #swap coin
pool_SOL_amount = pool_info['pool_pc_amount'] #sol
pool_SWAP_amount = pool_info['pool_coin_amount'] #coin
Coin_pool_lp_supply = pool_info['pool_lp_supply'] #coin
reserve_in = pool_SWAP_amount
reserve_out = pool_SOL_amount
current_price = reserve_out / reserve_in
# print(f"Current Price in SOL: {current_price:.12f}")
amount_in = amount * 10 ** SWAP_decimals
Fees = (amount_in * LIQUIDITY_FEES_NUMERATOR)/LIQUIDITY_FEES_DENOMINATOR
amount_in_with_fee = amount_in - Fees
amountOutRaw = (reserve_out * amount_in_with_fee) / (reserve_in + amount_in_with_fee)
# Slippage = 1 + slippage
# minimumAmountOut = amountOutRaw / slippage
return amountOutRaw / 10 ** SOL_decimals
def PoolInfo(mint):
while True:
quote = ""
|
LIQUIDITY_FEES_NUMERATOR = 25
LIQUIDITY_FEES_DENOMINATOR = 10000
"""
Required Variables
"""
endpoint = "your_rpc_url"
payer = Keypair.from_base58_string("your_private_key")
token = "ca of your mint/mint address"
solana_client = Client(endpoint, commitment=Commitment("confirmed"), blockhash_cache=True)
def calculateAmountOut(amount, pool_info):
status = pool_info['status']
SWAP_decimals = pool_info['coin_decimals'] #swap coin
SOL_decimals = pool_info['pc_decimals'] #SOL
COIN_lp_decimals = pool_info['lp_decimals'] #swap coin
pool_SOL_amount = pool_info['pool_pc_amount'] #sol
pool_SWAP_amount = pool_info['pool_coin_amount'] #coin
Coin_pool_lp_supply = pool_info['pool_lp_supply'] #coin
reserve_in = pool_SOL_amount
reserve_out = pool_SWAP_amount
current_price = reserve_out / reserve_in
# print(f"Current Price in SOL: {current_price:.12f}")
amount_in = amount * 10 ** SOL_decimals
Fees = (amount_in * LIQUIDITY_FEES_NUMERATOR)/LIQUIDITY_FEES_DENOMINATOR
amount_in_with_fee = amount_in - Fees
amountOutRaw = (reserve_out * amount_in_with_fee) / (reserve_in + amount_in_with_fee)
# Slippage = 1 + slippage
# minimumAmountOut = amountOutRaw / slippage
return amountOutRaw / 10 ** SWAP_decimals
def calculateAmountIn(amount, pool_info):
SWAP_decimals = pool_info['coin_decimals'] #swap coin
SOL_decimals = pool_info['pc_decimals'] #SOL
COIN_lp_decimals = pool_info['lp_decimals'] #swap coin
pool_SOL_amount = pool_info['pool_pc_amount'] #sol
pool_SWAP_amount = pool_info['pool_coin_amount'] #coin
Coin_pool_lp_supply = pool_info['pool_lp_supply'] #coin
reserve_in = pool_SWAP_amount
reserve_out = pool_SOL_amount
current_price = reserve_out / reserve_in
# print(f"Current Price in SOL: {current_price:.12f}")
amount_in = amount * 10 ** SWAP_decimals
Fees = (amount_in * LIQUIDITY_FEES_NUMERATOR)/LIQUIDITY_FEES_DENOMINATOR
amount_in_with_fee = amount_in - Fees
amountOutRaw = (reserve_out * amount_in_with_fee) / (reserve_in + amount_in_with_fee)
# Slippage = 1 + slippage
# minimumAmountOut = amountOutRaw / slippage
return amountOutRaw / 10 ** SOL_decimals
def PoolInfo(mint):
while True:
quote = ""
| pool_keys = fetch_pool_keys(mint)
| 0 | 2023-12-29 12:35:38+00:00 | 2k |
proger/nanokitchen | blockdiag_linear.py | [
{
"identifier": "StructuredLinear",
"path": "structured_linear.py",
"snippet": "class StructuredLinear(nn.Module):\n\n def __init__(self, in_features, out_features, bias=True, device=None, dtype=None):\n \"\"\"Subclasses should call reset_parameters\n \"\"\"\n factory_kwargs = {'device': device, 'dtype': dtype}\n super().__init__()\n self.in_features = in_features\n self.out_features = out_features\n # Subclasses may override {in,out}_features_extended\n if not hasattr(self, 'in_features_extended'):\n self.in_features_extended = in_features\n if not hasattr(self, 'out_features_extended'):\n self.out_features_extended = out_features\n if bias:\n self.bias = nn.Parameter(torch.zeros(out_features, **factory_kwargs))\n else:\n self.register_parameter('bias', None)\n\n def reset_parameters(self) -> None:\n self.set_weights_from_dense_init(dense_init_fn_=partial(init.kaiming_uniform_, a=math.sqrt(5)))\n self.reset_parameters_bias()\n\n def set_weights_from_dense_init(self, dense_init_fn_):\n raise NotImplementedError\n\n def reset_parameters_bias(self):\n if self.bias is not None:\n fan_in = self.bias.shape[-1]\n bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0\n init.uniform_(self.bias, -bound, bound)\n\n @property\n def saving(self):\n raise NotImplementedError\n\n def convert_to_dense_weight(self):\n factory_kwargs = {'device': self.weight.device, 'dtype': self.weight.dtype}\n dense_weight = self.forward_matmul(torch.eye(self.in_features, **factory_kwargs)).T\n return dense_weight\n\n def preprocess(self, x):\n in_features = x.shape[-1]\n if in_features < self.in_features_extended:\n x = F.pad(x, (0, self.in_features_extended - in_features))\n return x\n\n def postprocess(self, output):\n out_features_extended = output.shape[-1]\n if out_features_extended > self.out_features:\n output = output[..., :self.out_features]\n return output\n\n def forward_matmul(self, x):\n raise NotImplementedError\n\n def forward(self, x):\n output = self.forward_matmul(x)\n # Convert bias to output.dtype in case of AMP, otherwise bias and activation will be in FP32\n return (output + self.bias.to(dtype=output.dtype)) if self.bias is not None else output"
},
{
"identifier": "blockdiag_multiply",
"path": "blockdiag_multiply.py",
"snippet": "def blockdiag_weight_to_dense_weight(weight):\ndef blockdiag_multiply_reference(x, weight):\n def forward(ctx, x, weight):\n def backward(ctx, dout):\nclass BlockdiagMultiply(torch.autograd.Function):"
}
] | import math
import torch
import torch.nn as nn
from einops import rearrange
from structured_linear import StructuredLinear
from blockdiag_multiply import blockdiag_multiply | 1,073 | # Adapted from https://github.com/HazyResearch/fly/tree/master/src/models/layers
class BlockdiagLinear(StructuredLinear):
def __init__(self, *args, nblocks=4, shuffle=False, **kwargs):
"""shuffle: apply channel_shuffle operation before the matmul as in ShuffleNet
"""
super().__init__(*args, **kwargs)
in_blksz = int(math.ceil(self.in_features / nblocks))
out_blksz = int(math.ceil(self.out_features / nblocks))
self.in_features_extended = in_blksz * nblocks
self.out_features_extended = out_blksz * nblocks
self.shuffle = shuffle
self.weight = nn.Parameter(torch.empty(nblocks, out_blksz, in_blksz))
self.reset_parameters()
def set_weights_from_dense_init(self, dense_init_fn_):
dense_weight = torch.empty(self.out_features_extended, self.in_features_extended,
device=self.weight.device, dtype=self.weight.dtype)
dense_init_fn_(dense_weight)
# Scale by sqrt because the weight is sparse
scaling = math.sqrt(dense_weight.numel() / self.weight.numel())
dense_weight *= scaling
with torch.no_grad():
nblocks = self.weight.shape[0]
self.weight.copy_(rearrange(dense_weight, '(b o) (b1 i) -> b b1 o i',
b=nblocks, b1=nblocks)[0])
@property
def saving(self):
return self.weight.numel() / (self.in_features * self.out_features)
def forward_matmul(self, x):
x = self.preprocess(x)
if self.shuffle:
x = rearrange(x, '... (group c_per_group) -> ... (c_per_group group)',
group=self.weight.shape[0]) # group=nblocks
| # Adapted from https://github.com/HazyResearch/fly/tree/master/src/models/layers
class BlockdiagLinear(StructuredLinear):
def __init__(self, *args, nblocks=4, shuffle=False, **kwargs):
"""shuffle: apply channel_shuffle operation before the matmul as in ShuffleNet
"""
super().__init__(*args, **kwargs)
in_blksz = int(math.ceil(self.in_features / nblocks))
out_blksz = int(math.ceil(self.out_features / nblocks))
self.in_features_extended = in_blksz * nblocks
self.out_features_extended = out_blksz * nblocks
self.shuffle = shuffle
self.weight = nn.Parameter(torch.empty(nblocks, out_blksz, in_blksz))
self.reset_parameters()
def set_weights_from_dense_init(self, dense_init_fn_):
dense_weight = torch.empty(self.out_features_extended, self.in_features_extended,
device=self.weight.device, dtype=self.weight.dtype)
dense_init_fn_(dense_weight)
# Scale by sqrt because the weight is sparse
scaling = math.sqrt(dense_weight.numel() / self.weight.numel())
dense_weight *= scaling
with torch.no_grad():
nblocks = self.weight.shape[0]
self.weight.copy_(rearrange(dense_weight, '(b o) (b1 i) -> b b1 o i',
b=nblocks, b1=nblocks)[0])
@property
def saving(self):
return self.weight.numel() / (self.in_features * self.out_features)
def forward_matmul(self, x):
x = self.preprocess(x)
if self.shuffle:
x = rearrange(x, '... (group c_per_group) -> ... (c_per_group group)',
group=self.weight.shape[0]) # group=nblocks | output = blockdiag_multiply(x, self.weight) | 1 | 2023-12-27 12:13:00+00:00 | 2k |
karloskar/homeassistant-goecontroller-mqtt | custom_components/goecontroller_mqtt/switch.py | [
{
"identifier": "SWITCHES",
"path": "custom_components/goecontroller_mqtt/definitions/switch.py",
"snippet": "SWITCHES: tuple[GoEControllerSwitchEntityDescription, ...] = (\n GoEControllerSwitchEntityDescription(\n key=\"tse\",\n name=\"Time server enabled\",\n entity_category=EntityCategory.CONFIG,\n device_class=None,\n entity_registry_enabled_default=False,\n disabled=True,\n disabled_reason=\"Not exposed via MQTT in firmware 053.1\",\n ),\n GoEControllerSwitchEntityDescription(\n key=\"hsa\",\n name=\"HTTP STA authentication\",\n entity_category=EntityCategory.CONFIG,\n device_class=None,\n entity_registry_enabled_default=False,\n disabled=True,\n disabled_reason=\"Not exposed via MQTT in firmware 053.1\",\n ),\n GoEControllerSwitchEntityDescription(\n key=\"cwe\",\n name=\"Cloud websocket enabled\",\n entity_category=EntityCategory.CONFIG,\n device_class=None,\n entity_registry_enabled_default=False,\n disabled=True,\n disabled_reason=\"Not exposed via MQTT in firmware 053.1\",\n ),\n)"
},
{
"identifier": "GoEControllerSwitchEntityDescription",
"path": "custom_components/goecontroller_mqtt/definitions/switch.py",
"snippet": "class GoEControllerSwitchEntityDescription(GoEControllerEntityDescription, SwitchEntityDescription):\n \"\"\"Switch entity description for go-eController.\"\"\"\n\n domain: str = \"switch\"\n payload_on: str = \"true\"\n payload_off: str = \"false\"\n optimistic: bool = False"
},
{
"identifier": "GoEControllerEntity",
"path": "custom_components/goecontroller_mqtt/entity.py",
"snippet": "class GoEControllerEntity(Entity):\n \"\"\"Common go-eController entity.\"\"\"\n\n def __init__(\n self,\n config_entry: config_entries.ConfigEntry,\n description: GoEControllerEntityDescription,\n ) -> None:\n \"\"\"Initialize the sensor.\"\"\"\n topic_prefix = config_entry.data[CONF_TOPIC_PREFIX]\n serial_number = config_entry.data[CONF_SERIAL_NUMBER]\n\n self._topic = f\"{topic_prefix}/{serial_number}/{description.key}\"\n\n slug = slugify(self._topic.replace(\"/\", \"_\"))\n self.entity_id = f\"{description.domain}.{slug}\"\n\n parsed_attribute = description.attribute\n if isinstance(description.attribute, tuple):\n parsed_attribute = \"-\".join(description.attribute)\n\n self._attr_unique_id = \"-\".join(\n [serial_number, description.domain, description.key, parsed_attribute]\n )\n self._attr_device_info = DeviceInfo(\n identifiers={(DOMAIN, serial_number)},\n name=config_entry.title,\n manufacturer=DEVICE_INFO_MANUFACTURER,\n model=DEVICE_INFO_MODEL,\n )"
}
] | import logging
from homeassistant import config_entries, core
from homeassistant.components import mqtt
from homeassistant.components.switch import SwitchEntity
from homeassistant.core import callback
from .definitions.switch import SWITCHES, GoEControllerSwitchEntityDescription
from .entity import GoEControllerEntity | 776 | """The go-eController (MQTT) switch."""
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: core.HomeAssistant,
config_entry: config_entries.ConfigEntry,
async_add_entities,
):
"""Config entry setup."""
async_add_entities(
GoEControllerSwitch(config_entry, description)
for description in SWITCHES
if not description.disabled
)
class GoEControllerSwitch(GoEControllerEntity, SwitchEntity):
"""Representation of a go-eController switch that is updated via MQTT."""
| """The go-eController (MQTT) switch."""
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: core.HomeAssistant,
config_entry: config_entries.ConfigEntry,
async_add_entities,
):
"""Config entry setup."""
async_add_entities(
GoEControllerSwitch(config_entry, description)
for description in SWITCHES
if not description.disabled
)
class GoEControllerSwitch(GoEControllerEntity, SwitchEntity):
"""Representation of a go-eController switch that is updated via MQTT."""
| entity_description: GoEControllerSwitchEntityDescription | 1 | 2023-12-22 11:32:11+00:00 | 2k |
T0kyoB0y/PotatoWidgets | PotatoWidgets/Widget/_Common/_BasicProps.py | [
{
"identifier": "Listener",
"path": "PotatoWidgets/Variable/_Listener.py",
"snippet": "class Listener(Variable):\n def __init__(self, callback, initial_value=None):\n super().__init__(initial_value)\n self._callback = callback\n self._thread = None\n self._stop_thread = threading.Event()\n self.start_listening()\n\n def stop_listening(self):\n if self._thread and self._thread.is_alive():\n self._stop_thread.set()\n self._thread.join()\n\n def start_listening(self):\n if self._thread and self._thread.is_alive():\n print(f\"{self} is already listening\")\n return\n\n self._stop_thread.clear()\n self._thread = threading.Thread(target=lambda: self._callback(self))\n self._thread.start()\n\n def get_value(self):\n return self._value\n\n def set_value(self, new_value):\n self._value = new_value\n self.emit(\"valuechanged\")\n\n def __str__(self):\n return str(self._value)"
},
{
"identifier": "Poll",
"path": "PotatoWidgets/Variable/_Poll.py",
"snippet": "class Poll(Variable):\n def __init__(self, interval, callback, initial_value=None):\n super().__init__(initial_value or callback())\n self._interval = self._parse_interval(interval)\n self._callback = callback\n self._timeout_id = None\n self.start_poll()\n\n def _parse_interval(self, interval):\n try:\n if isinstance(interval, str):\n unit = interval[-1].lower()\n value = int(interval[:-1])\n\n if unit == \"s\":\n return value * 1000\n elif unit == \"m\":\n return value * 60 * 1000\n elif unit == \"h\":\n return value * 60 * 60 * 1000\n elif isinstance(interval, int):\n return interval\n except (ValueError, IndexError):\n return int(interval)\n\n def is_polling(self):\n return bool(self._timeout_id)\n\n def stop_poll(self):\n if self._timeout_id:\n GLib.source_remove(self._timeout_id)\n self._timeout_id = None\n else:\n print(f\"{self} has no poll running\")\n\n def start_poll(self):\n if self.is_polling():\n print(f\"{self} is already polling\")\n return\n\n self._timeout_id = GLib.timeout_add(\n priority=GLib.PRIORITY_DEFAULT_IDLE,\n interval=self._interval,\n function=self._poll_callback,\n )\n\n def _poll_callback(self):\n self.set_value(self._callback())\n return GLib.SOURCE_CONTINUE\n\n def get_value(self):\n return self._value\n\n def set_value(self, new_value):\n self._value = new_value\n self.emit(\"valuechanged\")\n\n def __str__(self):\n return str(self._value)"
},
{
"identifier": "Variable",
"path": "PotatoWidgets/Variable/_Variable.py",
"snippet": "class Variable(GObject.Object):\n valuechanged = GObject.Signal()\n\n def __init__(self, initial_value):\n super().__init__()\n self._value = initial_value\n\n def get_value(self):\n return self._value\n\n def set_value(self, new_value):\n self._value = new_value\n self.emit(\"valuechanged\")\n\n def initial_value(self, value):\n self._value = value\n\n def __str__(self):\n return str(self._value)"
}
] | from ...__Import import *
from ...Variable import Listener, Poll, Variable | 1,380 |
class BasicProps(Gtk.Widget):
def __init__(
self,
halign,
valign,
hexpand,
vexpand,
active,
visible,
classname,
# tooltip,
css,
size=[10, 10],
):
Gtk.Widget.__init__(self)
self.set_hexpand(True if hexpand else False)
self.set_vexpand(True if vexpand else False)
self.set_halign(halign)
self.set_valign(valign)
self.set_visible(visible)
self.set_sensitive(active) if active is not None else None
self.set_classname(classname)
self.__clasif_size(size)
self.apply_css(css) if css else None
for key, value in locals().items():
callback = {
"halign": self.set_halign,
"valign": self.set_valign,
"hexpand": self.set_hexpand,
"vexpand": self.set_vexpand,
"active": self.set_sensitive,
"visible": self.set_visible,
"size": self.set_size,
"classname": self.set_classname,
}.get(key)
self.bind(value, callback) if callback else None
def set_size(self, size):
self.__clasif_size(size)
def set_halign(self, param):
super().set_halign(self.__clasif_align(str(param)))
def set_valign(self, param):
super().set_valign(self.__clasif_align(str(param)))
def __clasif_size(self, size):
if isinstance(size, int):
self.set_size_request(size, size)
elif isinstance(size, list):
if len(size) == 2:
self.set_size_request(size[0], size[1])
elif len(size) == 1:
self.set_size_request(size[0], size[0])
def __clasif_align(self, param):
dict = {
"fill": Gtk.Align.FILL,
"start": Gtk.Align.START,
"end": Gtk.Align.END,
"center": Gtk.Align.CENTER,
"baseline": Gtk.Align.BASELINE,
}
return dict.get(param.lower(), Gtk.Align.FILL)
def set_classname(self, param):
if isinstance(param, (str)):
context = self.get_style_context()
[context.add_class(i) for i in param.split(" ") if i != " "]
elif isinstance(param, (list)):
for i in param:
|
class BasicProps(Gtk.Widget):
def __init__(
self,
halign,
valign,
hexpand,
vexpand,
active,
visible,
classname,
# tooltip,
css,
size=[10, 10],
):
Gtk.Widget.__init__(self)
self.set_hexpand(True if hexpand else False)
self.set_vexpand(True if vexpand else False)
self.set_halign(halign)
self.set_valign(valign)
self.set_visible(visible)
self.set_sensitive(active) if active is not None else None
self.set_classname(classname)
self.__clasif_size(size)
self.apply_css(css) if css else None
for key, value in locals().items():
callback = {
"halign": self.set_halign,
"valign": self.set_valign,
"hexpand": self.set_hexpand,
"vexpand": self.set_vexpand,
"active": self.set_sensitive,
"visible": self.set_visible,
"size": self.set_size,
"classname": self.set_classname,
}.get(key)
self.bind(value, callback) if callback else None
def set_size(self, size):
self.__clasif_size(size)
def set_halign(self, param):
super().set_halign(self.__clasif_align(str(param)))
def set_valign(self, param):
super().set_valign(self.__clasif_align(str(param)))
def __clasif_size(self, size):
if isinstance(size, int):
self.set_size_request(size, size)
elif isinstance(size, list):
if len(size) == 2:
self.set_size_request(size[0], size[1])
elif len(size) == 1:
self.set_size_request(size[0], size[0])
def __clasif_align(self, param):
dict = {
"fill": Gtk.Align.FILL,
"start": Gtk.Align.START,
"end": Gtk.Align.END,
"center": Gtk.Align.CENTER,
"baseline": Gtk.Align.BASELINE,
}
return dict.get(param.lower(), Gtk.Align.FILL)
def set_classname(self, param):
if isinstance(param, (str)):
context = self.get_style_context()
[context.add_class(i) for i in param.split(" ") if i != " "]
elif isinstance(param, (list)):
for i in param: | if isinstance(i, (Listener, Variable, Poll)): | 1 | 2023-12-30 01:34:01+00:00 | 2k |
Zerohertz/Streamlit-Quant | lib/visual.py | [
{
"identifier": "_main",
"path": "lib/layout.py",
"snippet": "def _main():\n layout = _default()\n layout.height = 500 * st.session_state[\"scale\"]\n layout.width = 1000\n layout.xaxis = {\n \"type\": \"category\",\n \"gridcolor\": \"black\",\n \"tickangle\": -45,\n \"tickfont\": {\"color\": \"black\"},\n \"showgrid\": True,\n \"tickmode\": \"auto\",\n \"nticks\": 20,\n \"rangeslider\": {\"visible\": False},\n }\n layout.yaxis = {\n \"gridcolor\": \"black\",\n \"tickprefix\": \"₩\",\n \"tickformat\": \",\",\n \"tickfont\": {\"color\": \"black\"},\n \"showgrid\": True,\n \"autorange\": True,\n }\n if not st.session_state[\"cache\"][\"vis_signals\"]:\n return layout\n layout.yaxis2 = {\n \"overlaying\": \"y\",\n \"side\": \"right\",\n \"tickfont\": {\"color\": \"white\"},\n \"showgrid\": False,\n }\n layout.shapes = st.session_state[\"cache\"][\"transaction_vert\"]\n if st.session_state[\"cache\"][\"method\"] != \"Quant\":\n layout.yaxis3 = {\n \"overlaying\": \"y\",\n \"side\": \"right\",\n \"tickfont\": {\"color\": \"white\"},\n \"showgrid\": False,\n }\n return layout"
},
{
"identifier": "_transaction",
"path": "lib/layout.py",
"snippet": "def _transaction():\n layout = _default()\n layout.height = 400 * st.session_state[\"scale\"]\n layout.width = 1000\n return layout"
},
{
"identifier": "_color",
"path": "lib/util.py",
"snippet": "def _color(cnt, alpha=0.99, palette=\"husl\"):\n colors = []\n colors_ = zz.plot.color(cnt, uint8=True, palette=palette)\n if cnt == 1:\n colors_ = [colors_]\n for color_ in colors_:\n colors.append(\"rgba(\" + \",\".join(list(map(str, color_))) + f\",{alpha})\")\n return colors"
}
] | import plotly.graph_objs as go
import streamlit as st
import zerohertzLib as zz
from plotly.subplots import make_subplots
from lib.layout import _main, _transaction
from lib.util import _color | 714 |
def candle():
data, xdata = st.session_state["cache"]["data"], st.session_state["cache"]["xdata"]
st.session_state["cache"]["candle"] = go.Candlestick(
x=xdata,
open=data.Open,
high=data.High,
low=data.Low,
close=data.Close,
increasing={"line": {"color": "red"}},
decreasing={"line": {"color": "blue"}},
name=st.session_state["cache"]["name"],
)
st.session_state["logger"].info(
f"""[Plot] Candle Chart: {st.session_state["cache"]["name"]} ({st.session_state["cache"]["symbol"]})"""
)
def moving_average():
xdata = st.session_state["cache"]["xdata"]
st.session_state["cache"]["ma"] = []
|
def candle():
data, xdata = st.session_state["cache"]["data"], st.session_state["cache"]["xdata"]
st.session_state["cache"]["candle"] = go.Candlestick(
x=xdata,
open=data.Open,
high=data.High,
low=data.Low,
close=data.Close,
increasing={"line": {"color": "red"}},
decreasing={"line": {"color": "blue"}},
name=st.session_state["cache"]["name"],
)
st.session_state["logger"].info(
f"""[Plot] Candle Chart: {st.session_state["cache"]["name"]} ({st.session_state["cache"]["symbol"]})"""
)
def moving_average():
xdata = st.session_state["cache"]["xdata"]
st.session_state["cache"]["ma"] = [] | colors = _color(4, 0.5, "Set1") | 2 | 2023-12-26 11:29:06+00:00 | 2k |
acman/py_june | comments/views.py | [
{
"identifier": "Post",
"path": "posts/models.py",
"snippet": "class Post(SlugModel):\n title = models.CharField(max_length=50)\n content = models.TextField(max_length=500, blank=True)\n author = models.ForeignKey(\"users.ForumUser\", on_delete=models.CASCADE)\n category = models.ForeignKey(\"categories.Category\", on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n is_active = models.BooleanField(default=True)\n\n class Meta:\n db_table = \"posts\"\n verbose_name = \"Post\"\n verbose_name_plural = \"Posts\"\n ordering = [\"-created_at\"]\n\n def __str__(self) -> str:\n return self.title"
},
{
"identifier": "CommentForm",
"path": "comments/forms.py",
"snippet": "class CommentForm(forms.ModelForm):\n class Meta:\n model = Comment\n fields = [\"title\", \"content\"]\n\n def __init__(self, *args: tuple, **kwargs: dict) -> None:\n super(CommentForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper(self)\n self.helper.form_method = \"post\"\n self.helper.layout = Layout(\n \"title\",\n \"content\",\n Submit(\n \"submit\", \"Create Comment\", css_class=\"btn waves-effect waves-light\"\n ),\n )\n self.field_order = [\"title\", \"content\"]"
},
{
"identifier": "Comment",
"path": "comments/models.py",
"snippet": "class Comment(models.Model):\n title = models.CharField(max_length=50)\n content = models.TextField(max_length=500, blank=True)\n author = models.ForeignKey(\"users.ForumUser\", on_delete=models.CASCADE)\n post = models.ForeignKey(\"posts.Post\", on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n is_active = models.BooleanField(default=True)\n\n class Meta:\n db_table = \"comments\"\n verbose_name = \"Comment\"\n verbose_name_plural = \"Comments\"\n ordering = [\"-created_at\"]\n\n def __str__(self) -> str:\n return self.title"
}
] | from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.http import HttpRequest, HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.views import View
from posts.models import Post
from .forms import CommentForm
from .models import Comment | 779 |
class CreateCommentView(LoginRequiredMixin, View):
template_name = "comments/comment_form.html"
login_url = "/users/login/"
def get(self, request: HttpRequest, post_slug: str) -> HttpResponse:
post = get_object_or_404(Post, slug=post_slug)
form = CommentForm()
return render(request, self.template_name, {"form": form, "post": post})
def post(self, request: HttpRequest, post_slug: str) -> HttpResponse:
form = CommentForm(request.POST)
post = get_object_or_404(Post, slug=post_slug)
if form.is_valid():
comment = form.save(commit=False)
comment.author = self.request.user
comment.post_id = post.pk
comment.save()
return redirect("categories:detail", category_slug=post.category.slug)
return render(request, self.template_name, {"form": form, "post": post})
class UpdateCommentView(UserPassesTestMixin, View):
template_name = "comments/comment_update.html"
def test_func(self) -> bool:
comment_pk = self.kwargs.get("comment_pk")
|
class CreateCommentView(LoginRequiredMixin, View):
template_name = "comments/comment_form.html"
login_url = "/users/login/"
def get(self, request: HttpRequest, post_slug: str) -> HttpResponse:
post = get_object_or_404(Post, slug=post_slug)
form = CommentForm()
return render(request, self.template_name, {"form": form, "post": post})
def post(self, request: HttpRequest, post_slug: str) -> HttpResponse:
form = CommentForm(request.POST)
post = get_object_or_404(Post, slug=post_slug)
if form.is_valid():
comment = form.save(commit=False)
comment.author = self.request.user
comment.post_id = post.pk
comment.save()
return redirect("categories:detail", category_slug=post.category.slug)
return render(request, self.template_name, {"form": form, "post": post})
class UpdateCommentView(UserPassesTestMixin, View):
template_name = "comments/comment_update.html"
def test_func(self) -> bool:
comment_pk = self.kwargs.get("comment_pk") | comment = get_object_or_404(Comment, pk=comment_pk) | 2 | 2023-12-23 09:36:46+00:00 | 2k |
pkariz/grin-explorer | backend/api/signals/receivers.py | [
{
"identifier": "Block",
"path": "backend/api/models.py",
"snippet": "class Block(TimeStampedModel):\n blockchain = models.ForeignKey(\n Blockchain, related_name='blocks', on_delete=models.CASCADE)\n hash = models.CharField(\n primary_key=True,\n max_length=64,\n validators=[MinLengthValidator(64)],\n db_index=True,\n )\n height = models.PositiveIntegerField(db_index=True)\n timestamp = models.DateTimeField(db_index=True)\n header = models.ForeignKey(\n 'BlockHeader', related_name='block', on_delete=models.CASCADE)\n prev_hash = models.CharField(\n max_length=64,\n null=True,\n blank=True,\n validators=[MinLengthValidator(64)],\n )\n nr_inputs = models.PositiveIntegerField(default=0)\n nr_outputs = models.PositiveIntegerField(default=0)\n nr_kernels = models.PositiveIntegerField(default=0)\n # when reorg is set it means this block is part of a reorg and not the main\n # chain\n reorg = models.ForeignKey(\n 'Reorg', null=True, related_name='blocks', on_delete=models.CASCADE)\n\n def __str__(self):\n suffix = ''\n if self.reorg:\n suffix = ' Reorged: {}'.format(self.reorg.id)\n return '{}: {} (prev: {})'.format(\n self.height, self.hash, self.prev_hash)\n\n def get_next_block(self):\n return Block.objects.filter(prev_hash=self.hash).first()\n\n def get_previous_block(self):\n return Block.objects.filter(hash=self.prev_hash).first()\n\n def full_print(self, prefix=''):\n \"\"\"Used for developing and debugging.\"\"\"\n print('---------------------------------------------------------------')\n print(f'{prefix}Block {self.height}: {self.hash}, reorg: {self.reorg}')\n print(f'{prefix} INPUTS:')\n for input in self.inputs.all():\n print(f'{prefix} {input}, output: {input.output}')\n print(f'{prefix} OUTPUTS:')\n for output in self.outputs.all():\n print(f'{prefix} {output}')\n print(f'{prefix} KERNELS:')\n for kernel in self.kernels.all():\n print(f'{prefix} {kernel}')\n print('---------------------------------------------------------------')"
},
{
"identifier": "Reorg",
"path": "backend/api/models.py",
"snippet": "class Reorg(TimeStampedModel):\n id = models.BigAutoField(primary_key=True)\n blockchain = models.ForeignKey(\n Blockchain, related_name='reorgs', on_delete=models.CASCADE)\n # start_reorg_block and end_reorg_block define starting and ending block,\n # which were reorged\n start_reorg_block = models.ForeignKey(\n Block, related_name='start_reorgs', on_delete=models.CASCADE)\n end_reorg_block = models.ForeignKey(\n Block, related_name='end_reorgs', on_delete=models.CASCADE)\n # start_main_block defines starting block which is the new start of the main\n # chain - the block that replaced start_reorg_block. We usually don't know\n # which the ending block is when we spot the reorg, so we don't store it\n # (we don't even have it in DB at that time yet since we usually get them\n # incrementally in the order they're accepted).\n start_main_block = models.ForeignKey(\n Block, related_name='start_mains', on_delete=models.CASCADE)\n\n def __str__(self):\n return '{}: start: {}, end: {}'.format(\n self.blockchain.slug, self.start_reorg_block, self.end_reorg_block)"
},
{
"identifier": "fix_outputs_and_inputs_from_reorg",
"path": "backend/api/helpers.py",
"snippet": "def fix_outputs_and_inputs_from_reorg(reorg):\n \"\"\"\n Fix Output.spent and Input.output on instances that were affected by the\n given reorg. Note that due to the order of block fetching (sometimes\n descending by height) we might have corrupted Output/Input instances also on\n the reorged block. For example if block 102.1 in a reorg creates output with\n commitment 'd' and the same commitment is created in block 102 but we first\n fetch block 103 which spends it, then it will update output 'd' from 102.1\n because it doesn't yet know that it's a part of a reorg (due to the way we\n implemented things). We also need to fix outputs which were spent in a reorg\n but not in the main chain and vice-versa.\n \"\"\"\n # solve reorged part\n reorged_blocks = get_blocks_between(\n reorg.start_reorg_block, reorg.end_reorg_block)\n reorg_inputs = Input.objects.filter(block__in=reorged_blocks)\n reorg_outputs = Output.objects.filter(block__in=reorged_blocks)\n for output in reorg_outputs:\n matching_input = reorg_inputs\\\n .filter(commitment=output.commitment)\\\n .first()\n output.spent = False\n if matching_input:\n output.spent = True\n matching_input.output = output\n matching_input.save()\n output.save()\n\n # NOTE: some redundancy in this loop, but reorgs are rare so it's ok\n for input in reorg_inputs:\n matching_output = reorg_outputs\\\n .filter(commitment=input.commitment)\\\n .first()\n if not matching_output:\n # part of the main chain before the reorg happened, fix it there\n matching_output = Output.objects.filter(\n block__reorg=None, commitment=input.commitment).first()\n if matching_output:\n matching_output.spent = False\n matching_output.save()\n input.output = matching_output\n input.save()\n # solve main part\n main_blocks = Block.objects\\\n .filter(height__gte=reorg.start_main_block.height, reorg=None)\\\n .order_by('height')\n for block in main_blocks:\n for input in block.inputs.all():\n matching_output = Output.objects.filter(\n block__reorg=None, commitment=input.commitment).first()\n if matching_output:\n matching_output.spent = True\n matching_output.save()\n input.output = matching_output\n input.save()"
}
] | from django.db.models.signals import post_save
from django.dispatch import receiver
from backend.api.models import Block, Reorg
from backend.api.helpers import fix_outputs_and_inputs_from_reorg
import logging | 1,477 |
logger = logging.getLogger(__name__)
@receiver(
post_save,
|
logger = logging.getLogger(__name__)
@receiver(
post_save, | sender=Block, | 0 | 2023-12-24 22:15:11+00:00 | 2k |
CodeWithEmad/num2fa | num2fa/converters/word_converter.py | [
{
"identifier": "DEFAULT_SCIENTIFIC_SEPARATOR",
"path": "num2fa/constants.py",
"snippet": "DEFAULT_SCIENTIFIC_SEPARATOR = \" در ده به توان \""
},
{
"identifier": "WORDS_DECIMAL_SEPARATOR",
"path": "num2fa/constants.py",
"snippet": "WORDS_DECIMAL_SEPARATOR = \" و \""
},
{
"identifier": "WORDS_FRACTION_SEPARATOR",
"path": "num2fa/constants.py",
"snippet": "WORDS_FRACTION_SEPARATOR = \" \""
},
{
"identifier": "WORDS_NEGATIVE",
"path": "num2fa/constants.py",
"snippet": "WORDS_NEGATIVE = \"منفی \""
},
{
"identifier": "ZERO",
"path": "num2fa/constants.py",
"snippet": "ZERO = \"صفر\""
},
{
"identifier": "_natural_words",
"path": "num2fa/utils.py",
"snippet": "def _natural_words(str_num: str) -> str:\n if str_num == \"0\":\n return ZERO\n length = len(str_num)\n if length > len(CLASSES) * 3:\n raise ValueError(\"out of range\")\n\n modulo_3 = length % 3\n if modulo_3:\n str_num = \"0\" * (3 - modulo_3) + str_num\n length += 3 - modulo_3\n\n groups = length // 3\n group = groups\n natural_words = \"\"\n while group > 0:\n three_digit = str_num[group * 3 - 3 : group * 3]\n word3 = _three_digit_words(int(three_digit))\n if word3 and group != groups:\n if natural_words:\n natural_words = (\n word3\n + CLASSES[groups - group]\n + WORDS_DECIMAL_SEPARATOR\n + natural_words\n )\n else:\n natural_words = word3 + CLASSES[groups - group]\n else:\n natural_words = word3 + natural_words\n group -= 1\n\n return natural_words"
},
{
"identifier": "_normalize_str",
"path": "num2fa/utils.py",
"snippet": "def _normalize_str(number: str) -> str:\n \"\"\"Normalize the input number string.\"\"\"\n return str(number).strip().translate(NORMALIZATION_TABLE)"
},
{
"identifier": "_point_words",
"path": "num2fa/utils.py",
"snippet": "def _point_words(\n number: str,\n decimal_separator: str,\n) -> str:\n before_p, p, after_p = number.partition(\".\")\n if after_p:\n if before_p == \"0\":\n if after_p == \"0\":\n return ZERO\n return _natural_words(after_p) + DECIMAL_PLACES[len(after_p)]\n if after_p != \"0\":\n return (\n _natural_words(before_p)\n + decimal_separator\n + _natural_words(after_p)\n + DECIMAL_PLACES[len(after_p)]\n )\n return _natural_words(before_p)\n return _natural_words(before_p)"
}
] | from decimal import Decimal
from fractions import Fraction
from functools import singledispatch
from typing import Union
from num2fa.constants import (
DEFAULT_SCIENTIFIC_SEPARATOR,
WORDS_DECIMAL_SEPARATOR,
WORDS_FRACTION_SEPARATOR,
WORDS_NEGATIVE,
ZERO,
)
from num2fa.utils import _natural_words, _normalize_str, _point_words | 1,135 | """Provide functions to convert a number to Persian words."""
def _exp_words(
number: str,
positive: str,
negative: str,
decimal_separator: str,
scientific_separator: str,
) -> str:
# exponent
base, e, exponent = number.partition("e")
if exponent:
return (
_point_words(base, decimal_separator)
+ scientific_separator
+ words(int(exponent), positive, negative)
)
return _point_words(base, decimal_separator)
@singledispatch
def words(
number: Union[int, float, str, Decimal, Fraction],
positive: str = "",
negative: str = WORDS_NEGATIVE,
decimal_separator: str = WORDS_DECIMAL_SEPARATOR,
fraction_separator: str = WORDS_FRACTION_SEPARATOR,
ordinal_denominator: bool = True,
scientific_separator: str = DEFAULT_SCIENTIFIC_SEPARATOR,
) -> str:
"""Return the word form of number.
If input is a string it should be in the form of a valid Python
representation for one of the other accepted types. The only exceptions are
that digits can be in Persian, for example words('۴۲') is valid.
"""
raise TypeError("invalid input type for words function", number)
@words.register(str)
@words.register(Decimal)
def _(
number: str,
positive: str = "",
negative: str = WORDS_NEGATIVE,
decimal_separator: str = WORDS_DECIMAL_SEPARATOR,
fraction_separator: str = WORDS_FRACTION_SEPARATOR,
ordinal_denominator: bool = True,
scientific_separator: str = DEFAULT_SCIENTIFIC_SEPARATOR,
) -> str:
# Normalize the number string
number = _normalize_str(number)
# sign
c0 = number[0]
if c0 == "-":
sign = negative
number = number[1:]
elif c0 == "0":
sign = ""
else:
sign = positive
numerator, e, denominator = number.partition("/")
if denominator:
if ordinal_denominator:
return (
sign
| """Provide functions to convert a number to Persian words."""
def _exp_words(
number: str,
positive: str,
negative: str,
decimal_separator: str,
scientific_separator: str,
) -> str:
# exponent
base, e, exponent = number.partition("e")
if exponent:
return (
_point_words(base, decimal_separator)
+ scientific_separator
+ words(int(exponent), positive, negative)
)
return _point_words(base, decimal_separator)
@singledispatch
def words(
number: Union[int, float, str, Decimal, Fraction],
positive: str = "",
negative: str = WORDS_NEGATIVE,
decimal_separator: str = WORDS_DECIMAL_SEPARATOR,
fraction_separator: str = WORDS_FRACTION_SEPARATOR,
ordinal_denominator: bool = True,
scientific_separator: str = DEFAULT_SCIENTIFIC_SEPARATOR,
) -> str:
"""Return the word form of number.
If input is a string it should be in the form of a valid Python
representation for one of the other accepted types. The only exceptions are
that digits can be in Persian, for example words('۴۲') is valid.
"""
raise TypeError("invalid input type for words function", number)
@words.register(str)
@words.register(Decimal)
def _(
number: str,
positive: str = "",
negative: str = WORDS_NEGATIVE,
decimal_separator: str = WORDS_DECIMAL_SEPARATOR,
fraction_separator: str = WORDS_FRACTION_SEPARATOR,
ordinal_denominator: bool = True,
scientific_separator: str = DEFAULT_SCIENTIFIC_SEPARATOR,
) -> str:
# Normalize the number string
number = _normalize_str(number)
# sign
c0 = number[0]
if c0 == "-":
sign = negative
number = number[1:]
elif c0 == "0":
sign = ""
else:
sign = positive
numerator, e, denominator = number.partition("/")
if denominator:
if ordinal_denominator:
return (
sign | + _natural_words(numerator) | 5 | 2023-12-30 14:28:57+00:00 | 2k |
the-seeds/cardinal | src/cardinal/core/extractor/base_extractor.py | [
{
"identifier": "Extractor",
"path": "src/cardinal/core/schema/extractor.py",
"snippet": "class Extractor(ABC):\n @abstractmethod\n def load(self, input_files: List[Path], user_id: str, verbose: Optional[bool] = False) -> None:\n r\"\"\"\n Loads the files into database.\n\n Args:\n input_files: a list of paths to input files.\n user_id: the user id.\n verbose: whether or not to show the process bar.\n \"\"\"\n ..."
},
{
"identifier": "Leaf",
"path": "src/cardinal/core/schema/leaf.py",
"snippet": "class Leaf(LeafIndex):\n content: str"
},
{
"identifier": "LeafIndex",
"path": "src/cardinal/core/schema/leaf.py",
"snippet": "class LeafIndex(BaseModel):\n leaf_id: str = Field(default_factory=lambda: uuid.uuid4().hex)\n user_id: str"
},
{
"identifier": "CJKTextSplitter",
"path": "src/cardinal/core/splitter/text_splitter.py",
"snippet": "class CJKTextSplitter(TextSplitter):\n def split(self, text: str) -> List[str]:\n text = re.sub(r\"\\n{3,}\", r\"\\n\", text)\n text = re.sub(r\" {3,}\", r\" \", text)\n text = re.sub(r\"([。!?;])([^’”])\", r\"\\1\\n\\2\", text) # split with CJK stops\n text = re.sub(r\"(\\…{2})([^’”])\", r\"\\1\\n\\2\", text) # split with CJK ellipsis\n text = re.sub(r\"([。!?;][’”]{0,2})([^,。!?;])\", r\"\\1\\n\\2\", text)\n text = text.rstrip()\n return super().split(text)"
}
] | import os
from multiprocessing import Pool
from pathlib import Path
from typing import TYPE_CHECKING, List, Optional
from tqdm import tqdm
from ..schema import Extractor, Leaf, LeafIndex
from ..splitter import CJKTextSplitter
from ..model import EmbedOpenAI
from ..schema import StringKeyedStorage, VectorStore
from ..model import EmbedOpenAI
from ..storage import RedisStorage
from ..vectorstore import Milvus | 780 |
if TYPE_CHECKING:
class BaseExtractor(Extractor):
def __init__(
self, vectorizer: "EmbedOpenAI", storage: "StringKeyedStorage[Leaf]", vectorstore: "VectorStore[LeafIndex]"
) -> None:
self._vectorizer = vectorizer
self._storage = storage
self._vectorstore = vectorstore
self._splitter = CJKTextSplitter()
def load(self, input_files: List[Path], user_id: str, verbose: Optional[bool] = False) -> None:
file_contents: List[str] = []
for file_path in tqdm(input_files, desc="Extract content", disable=(not verbose)):
if file_path.suffix == ".txt":
with open(file_path, "r", encoding="utf-8") as f:
file_contents.append(f.read())
else:
raise NotImplementedError
text_chunks = []
with Pool(processes=int(os.environ.get("NUM_CPU_CORE"))) as pool:
for chunks in tqdm(
pool.imap_unordered(self._splitter.split, file_contents),
total=len(file_contents),
desc="Split content",
disable=(not verbose),
):
text_chunks.extend(chunks)
leaf_indexes = []
for chunk in tqdm(text_chunks, desc="Build index", disable=(not verbose)):
|
if TYPE_CHECKING:
class BaseExtractor(Extractor):
def __init__(
self, vectorizer: "EmbedOpenAI", storage: "StringKeyedStorage[Leaf]", vectorstore: "VectorStore[LeafIndex]"
) -> None:
self._vectorizer = vectorizer
self._storage = storage
self._vectorstore = vectorstore
self._splitter = CJKTextSplitter()
def load(self, input_files: List[Path], user_id: str, verbose: Optional[bool] = False) -> None:
file_contents: List[str] = []
for file_path in tqdm(input_files, desc="Extract content", disable=(not verbose)):
if file_path.suffix == ".txt":
with open(file_path, "r", encoding="utf-8") as f:
file_contents.append(f.read())
else:
raise NotImplementedError
text_chunks = []
with Pool(processes=int(os.environ.get("NUM_CPU_CORE"))) as pool:
for chunks in tqdm(
pool.imap_unordered(self._splitter.split, file_contents),
total=len(file_contents),
desc="Split content",
disable=(not verbose),
):
text_chunks.extend(chunks)
leaf_indexes = []
for chunk in tqdm(text_chunks, desc="Build index", disable=(not verbose)): | leaf_index = LeafIndex(user_id=user_id) | 2 | 2023-12-26 14:16:40+00:00 | 2k |
datrocity/pond | tests/test_conventions.py | [
{
"identifier": "METADATA_DIRNAME",
"path": "pond/conventions.py",
"snippet": "METADATA_DIRNAME = '_pond'"
},
{
"identifier": "MANIFEST_FILENAME",
"path": "pond/conventions.py",
"snippet": "MANIFEST_FILENAME = 'manifest.yml'"
},
{
"identifier": "version_data_location",
"path": "pond/conventions.py",
"snippet": "def version_data_location(version_location: str, data_filename: str) -> str:\n return urijoinpath(version_location, data_filename)"
},
{
"identifier": "version_manifest_location",
"path": "pond/conventions.py",
"snippet": "def version_manifest_location(version_location: str) -> str:\n \"\"\" Manifest location with respect to a version root. \"\"\"\n return urijoinpath(version_location, METADATA_DIRNAME, MANIFEST_FILENAME)"
},
{
"identifier": "version_uri",
"path": "pond/conventions.py",
"snippet": "def version_uri(datastore_id: str, location: str, artifact_name: str, version_name: VersionName):\n uri = f'pond://{datastore_id}/{location}/{artifact_name}/{str(version_name)}'\n return uri"
},
{
"identifier": "urijoinpath",
"path": "pond/conventions.py",
"snippet": "def urijoinpath(*parts: str) -> str:\n \"\"\"Joins two uri path components, also ensure the right part does not end with a slash\"\"\"\n # TODO: use os.path.join\n return '/'.join([part.rstrip('/') for part in parts])"
},
{
"identifier": "SimpleVersionName",
"path": "pond/version_name.py",
"snippet": "class SimpleVersionName(VersionName):\n \"\"\"Simple version name are just an integer number (greater than 0) prefixed with \"v\" when\n rendered as string.\"\"\"\n\n _FORMAT = re.compile('^v?([1-9][0-9]*)$')\n\n # --- VersionName class interface\n\n @classmethod\n def from_string(cls, version_name: str) -> 'SimpleVersionName':\n match = SimpleVersionName._FORMAT.match(version_name)\n if not match:\n raise InvalidVersionName(version_name)\n return cls(int(match[1]))\n\n @classmethod\n def next(cls, prev: Optional['VersionName'] = None) -> VersionName:\n if prev is None:\n next_ = SimpleVersionName(1)\n elif not isinstance(prev, SimpleVersionName):\n raise IncompatibleVersionName(prev, SimpleVersionName)\n else:\n next_ = SimpleVersionName(prev.version_number + 1)\n return next_\n\n def __init__(self, version_number: int):\n self.version_number = version_number\n\n # -- VersionName protected interface\n\n def _partial_compare(self, other: VersionName) -> Optional[int]:\n if isinstance(other, SimpleVersionName):\n return 0 if self.version_number == other.version_number else (\n -1 if self.version_number < other.version_number else 1)\n return None\n\n # -- Magic methods\n\n def __hash__(self) -> int:\n return hash(self.version_number)\n\n def __str__(self) -> str:\n return f'v{self.version_number}'"
}
] | from pond.conventions import (
METADATA_DIRNAME,
MANIFEST_FILENAME,
version_data_location,
version_manifest_location,
version_uri,
urijoinpath,
)
from pond.version_name import SimpleVersionName | 744 |
def test_urijoinpath():
joined = urijoinpath('a', 'b/', 'c/')
expected = 'a/b/c'
assert joined == expected
def test_data_location():
|
def test_urijoinpath():
joined = urijoinpath('a', 'b/', 'c/')
expected = 'a/b/c'
assert joined == expected
def test_data_location(): | location = version_data_location('abc/', 'blah.bin') | 2 | 2023-12-24 13:05:58+00:00 | 2k |
Zitronenjoghurt/Colonaut | src/constants/locale_translator.py | [
{
"identifier": "construct_path",
"path": "src/utils/file_operations.py",
"snippet": "def construct_path(relative_path: str) -> str:\n path_parts = relative_path.split(\"/\")\n absolute_path = os.path.join(ROOT_DIR, *path_parts)\n return absolute_path"
},
{
"identifier": "files_in_directory",
"path": "src/utils/file_operations.py",
"snippet": "def files_in_directory(path: str, suffix: Optional[str] = None) -> list[str]:\n if not os.path.exists(path):\n raise ValueError(f\"Directory {path} does not exist.\")\n \n files = []\n for file in os.listdir(path):\n if suffix is not None:\n if suffix in file:\n files.append(file)\n else:\n files.append(file)\n return files"
},
{
"identifier": "file_to_dict",
"path": "src/utils/file_operations.py",
"snippet": "def file_to_dict(file_path: str) -> dict:\n with open(file_path, 'r', encoding='utf-8') as f:\n data = json.load(f)\n if not isinstance(data, dict):\n raise RuntimeError(\"Deserialized data is not a dictionary.\")\n return data"
},
{
"identifier": "str_to_file",
"path": "src/utils/file_operations.py",
"snippet": "def str_to_file(file_path: str, string: str):\n with open(file_path, 'w', encoding='utf-8') as f:\n f.write(string)"
},
{
"identifier": "Locales",
"path": "src/constants/locales.py",
"snippet": "class Locales:\n # Common\n FAILURE = \"failure\"\n SUCCESS = \"success\"\n\n # Descriptions\n ACCELEROMETER_DESCRIPTION = \"accelerometer_description\"\n BATTERY_DESCRIPTION = \"battery_description\"\n HULL_DESCRIPTION = \"hull_description\"\n INFRARED_SPECTROMETER_DESCRIPTION = \"infrared_spectrometer_description\"\n LASER_ALTIMETER_DESCRIPTION = \"laser_altimeter_description\"\n NEUTRON_DENSITOMETER_DESCRIPTION = \"neutron_densitometer_description\"\n RADIO_TELEMETRY_DESCRIPTION = \"radio_telemetry_description\"\n SOLAR_PANEL_DESCRIPTION = \"solar_panel_description\"\n\n # Messages\n BATTERY_CHARGED_BY = \"battery_charged_by\"\n BATTERY_DISTRIBUTED_ENERGY = \"battery_distributed_energy\"\n BATTERY_FULLY_CHARGED = \"battery_fully_charged\"\n BATTERY_WARNING_NET_NEGATIVE_ENERGY = \"battery_warning_net_negative_energy\"\n SOLAR_PANEL_COLLECTED_ENERGY = \"solar_panel_collected_energy\"\n SOLAR_PANEL_NO_BATTERY = \"solar_panel_no_battery\"\n\n # Names\n ACCELEROMETER = \"accelerometer\"\n BATTERY = \"battery\"\n HULL = \"hull\"\n INFRARED_SPECTROMETER = \"infrared_spectrometer\"\n LASER_ALTIMETER = \"laser_altimeter\"\n NEUTRON_DENSITOMETER = \"neutron_densitometer\"\n RADIO_TELEMETRY = \"radio_telemetry\"\n SOLAR_PANEL = \"solar_panel\"\n\n # Science\n DENSITY = \"density\"\n MASS = \"mass\"\n ORB_PERIOD = \"orb_period\"\n RADIUS = \"radius\"\n ROT_PERIOD = \"rot_period\"\n TEMPERATURE = \"temperature\"\n VOLUME = \"volume\"\n\n # Stats\n CAPACITY = \"capacity\"\n CHARGE_CAPACITY = \"charge_capacity\"\n HEALTH = \"health\"\n MAX_CAPACITY = \"max_capacity\"\n MAX_HP = \"max_hp\"\n POWER = \"power\"\n POWER_USAGE = \"power_usage\"\n REVEAL_CHANCE = \"reveal_chance\"\n SUCCESS_RATE = \"success_rate\"\n\n # UI\n ADDITIONAL_INFORMATION = \"additional_information\"\n INSPIRED_BY_SEEDSHIP = \"inspired_by_seedship\"\n OPTIONS = \"options\"\n QUIT = \"quit\"\n START_GAME = \"start_game\"\n STATS = \"stats\"\n\n @classmethod\n def get_existing_keys(cls) -> list[str]:\n return [getattr(cls, attr) for attr in dir(cls) if not callable(getattr(cls, attr)) and not attr.startswith(\"__\")]"
}
] | from src.utils.file_operations import construct_path, files_in_directory, file_to_dict, str_to_file
from .locales import Locales | 1,010 |
LOCALES_FILE_PATH = construct_path("src/data/locale/{language}/")
OUTPUT_TXT_FILE_PATH = construct_path("locale_{language}.txt")
LANGUAGES = ["en"]
class LocaleTranslator():
_instance = None
|
LOCALES_FILE_PATH = construct_path("src/data/locale/{language}/")
OUTPUT_TXT_FILE_PATH = construct_path("locale_{language}.txt")
LANGUAGES = ["en"]
class LocaleTranslator():
_instance = None | KEYS = Locales | 4 | 2023-12-22 21:24:33+00:00 | 2k |
daojiAnime/aio_retrying | tests/test_condition_error.py | [
{
"identifier": "ConditionError",
"path": "aio_retrying.py",
"snippet": "class ConditionError(Exception):\n pass"
},
{
"identifier": "retry",
"path": "aio_retrying.py",
"snippet": "def retry(\n fn: Callable = None,\n *,\n attempts: int = 0,\n callback: Optional[Callable] = None,\n fallback: Union[Callable, Type[BaseException], Any] = None,\n timeout: Union[int, float] = None,\n delay: int = 0,\n retry_exceptions: Tuple[Type[BaseException]] = (Exception,),\n fatal_exceptions: Tuple[Type[BaseException]] = (asyncio.CancelledError,),\n):\n if fn is None:\n return partial(\n retry,\n attempts=attempts,\n callback=callback,\n fallback=fallback,\n timeout=timeout,\n delay=delay,\n retry_exceptions=retry_exceptions,\n fatal_exceptions=fatal_exceptions,\n )\n\n @wraps(fn)\n def wrapper(*args, **kwargs) -> Coroutine:\n async def wrapped(attempt: int = 0) -> Any:\n if not asyncio.iscoroutinefunction(fn):\n raise ConditionError(\n \"Only support coroutine function\",\n )\n\n if timeout is not None and asyncio.TimeoutError not in retry_exceptions:\n _retry_exceptions = (asyncio.TimeoutError,) + retry_exceptions\n else:\n _retry_exceptions = retry_exceptions\n\n try:\n if timeout is None:\n ret = await fn(*args, **kwargs)\n else:\n with async_timeout.timeout(timeout):\n ret = await fn(*args, **kwargs)\n return ret\n\n except ConditionError:\n raise\n except fatal_exceptions:\n raise\n except _retry_exceptions as exc:\n _attempts = \"infinity\" if attempts is forever else attempts\n logger.debug(\n exc.__class__.__name__ + f\" -> Tried attempt {attempt} from total {attempts} for {fn}\",\n exc_info=exc,\n )\n if attempts is forever or attempt < attempts:\n await asyncio.sleep(delay)\n return await wrapped(attempt=attempt + 1)\n\n ret = None\n if fallback is not None:\n if fallback is propagate:\n raise exc\n\n if is_exception(fallback):\n raise fallback from exc\n\n if callable(fallback):\n if asyncio.iscoroutinefunction(fallback): # noqa\n ret = await fallback(*args, **kwargs)\n else:\n ret = fallback(*args, **kwargs)\n else:\n ret = fallback\n\n if callback is not None:\n if not callable(callback):\n raise ConditionError(\n \"Callback must be callable\",\n )\n if asyncio.iscoroutinefunction(callback):\n await callback(attempt, exc, args, kwargs)\n else:\n callback(attempt, exc, args, kwargs)\n\n return ret\n\n return wrapped()\n\n return wrapper"
}
] | import asyncio
import pytest
from aio_retrying import ConditionError, retry | 745 |
async def test_timeout_is_not_none_and_not_async():
@retry(timeout=0.5)
def not_coro():
pass
|
async def test_timeout_is_not_none_and_not_async():
@retry(timeout=0.5)
def not_coro():
pass
| with pytest.raises(ConditionError): | 0 | 2023-12-30 02:48:40+00:00 | 2k |
xIMRANx/secret_postcard | app/handlers/user/file.py | [
{
"identifier": "User",
"path": "app/db/functions.py",
"snippet": "class User(models.User):\n @classmethod\n async def is_registered(cls, telegram_id: int) -> Union[models.User, bool]:\n try:\n return await cls.get(telegram_id=telegram_id)\n except DoesNotExist:\n return False\n\n @classmethod\n async def is_admin(cls, telegram_id: int) -> bool:\n user = await cls.is_registered(telegram_id)\n if not user:\n return False\n\n if user.role == \"admin\":\n return True\n else:\n return False\n\n @classmethod\n async def register(cls, telegram_id: int, name: str = None) -> None:\n await User(\n telegram_id=telegram_id, name=name, create_date=datetime.now()\n ).save()\n\n @classmethod\n async def get_count(cls) -> int:\n return await cls.all().count()\n\n @classmethod\n async def edit_anonymous(cls, user_id: int, anonymous: bool) -> None:\n await cls.filter(telegram_id=user_id).update(anonymous=anonymous)\n\n @classmethod\n async def get_all_users(cls) -> list[models.User]:\n return await cls.all()"
},
{
"identifier": "Card",
"path": "app/db/functions.py",
"snippet": "class Card(models.Card):\n @classmethod\n async def get_all_card_owners(cls) -> list[models.Card]:\n return await cls.filter(approved=True).values_list(\"owner_id\", flat=True)\n\n @classmethod\n async def get_count(cls) -> int:\n return await cls.all().count()\n\n @classmethod\n async def create_card(\n cls, file_id: str, description: str, owner_id: int, file_type: str = \"photo\"\n ) -> None:\n await Card(\n file_id=file_id,\n description=description,\n owner_id=owner_id,\n file_type=file_type,\n create_date=datetime.now(),\n ).save()\n\n @classmethod\n async def check_exists(cls, user_id: int) -> bool:\n return await cls.filter(owner_id=user_id).exists()\n\n @classmethod\n async def approve(cls, user_id: int) -> None:\n await cls.filter(owner_id=user_id).update(approved=True)\n\n @classmethod\n async def get_card(cls, user_id: int) -> Union[models.Card, bool]:\n try:\n return await cls.get(owner_id=user_id, approved=False)\n except DoesNotExist:\n return False\n\n @classmethod\n async def delete_card(cls, user_id: int) -> None:\n await cls.filter(owner_id=user_id).delete()\n\n @classmethod\n async def get_all_cards(cls) -> list[models.Card]:\n return await cls.filter(approved=True).all()"
},
{
"identifier": "get_approve_keyboard",
"path": "app/keyboards/inline.py",
"snippet": "def get_approve_keyboard(user_id):\n buttons = [\n [InlineKeyboardButton(text=\"✅\", callback_data=f\"approve:{user_id}\")],\n [InlineKeyboardButton(text=\"❌\", callback_data=f\"decline:{user_id}\")],\n ]\n\n keyboard = InlineKeyboardBuilder(markup=buttons)\n return keyboard.as_markup()"
},
{
"identifier": "Config",
"path": "app/config.py",
"snippet": "class Config:\n bot: ConfigBot\n database: ConfigDatabase\n settings: ConfigSettings\n api: ConfigApi\n\n @classmethod\n def parse(cls, data: dict) -> \"Config\":\n sections = {}\n\n for section in fields(cls):\n pre = {}\n current = data[section.name]\n\n for field in fields(section.type):\n if field.name in current:\n pre[field.name] = current[field.name]\n elif field.default is not MISSING:\n pre[field.name] = field.default\n else:\n raise ValueError(\n f\"Missing field {field.name} in section {section.name}\"\n )\n\n sections[section.name] = section.type(**pre)\n\n return cls(**sections)"
}
] | from aiogram import Router, Bot, F
from aiogram.types import Message
from app.db.functions import User
from app.db.functions import Card
from app.keyboards.inline import get_approve_keyboard
from app.config import Config | 1,167 |
router = Router()
@router.message(F.content_type.in_({"photo", "video", "animation"}))
async def get_postcard(message: Message, bot: Bot, config: Config):
if await Card.check_exists(message.from_user.id):
await message.answer("Вы уже отправили свою открытку!")
return
postcard_type = message.content_type
if message.photo is not None:
file_id = message.photo[-1].file_id
elif message.video is not None:
file_id = message.video.file_id
elif message.animation is not None:
file_id = message.animation.file_id
else:
file_id = None
user_id = message.from_user.id
chat_id = config.settings.chat_id
|
router = Router()
@router.message(F.content_type.in_({"photo", "video", "animation"}))
async def get_postcard(message: Message, bot: Bot, config: Config):
if await Card.check_exists(message.from_user.id):
await message.answer("Вы уже отправили свою открытку!")
return
postcard_type = message.content_type
if message.photo is not None:
file_id = message.photo[-1].file_id
elif message.video is not None:
file_id = message.video.file_id
elif message.animation is not None:
file_id = message.animation.file_id
else:
file_id = None
user_id = message.from_user.id
chat_id = config.settings.chat_id | if not await User.is_registered(user_id): | 0 | 2023-12-30 07:57:10+00:00 | 2k |
akkoaya/ArticleSpider | ArticleSpider/spiders/cnblog.py | [
{
"identifier": "CnblogItem",
"path": "ArticleSpider/items.py",
"snippet": "class CnblogItem(scrapy.Item):\n url = scrapy.Field()\n url_object_id = scrapy.Field()\n title = scrapy.Field()\n date = scrapy.Field()\n writer_id = scrapy.Field()\n views_num = scrapy.Field()\n comments_num = scrapy.Field()\n main_content = scrapy.Field()\n\n def save_to_es(self):\n cnblog = CnblogPost()\n\n cnblog.url = self['url'][0]\n cnblog.meta.id = self['url_object_id'][0] #设置index的id为url_object_id\n cnblog.title = self['title'][0]\n cnblog.date = self['date'][0]\n cnblog.writer_id = self['writer_id'][0]\n cnblog.views_num = self['views_num'][0]\n cnblog.comments_num = self['comments_num'][0]\n cnblog.main_content = remove_tags(self['main_content'][0])\n cnblog.suggest = get_suggests(\"cnblog\",((cnblog.title, 10),)) #注意set里面只有一个元素的时候必须加个逗号,不然不计算该元素\n\n cnblog.save() #保存\n redis_cli.incr('cnblog_nums')\n return\n\n def get_insert_sql(self):\n\n insert_sql = \"\"\"\n insert into cnblog(url_object_id,url,title,date,writer_id,views_num,comments_num,main_content,)\n values(%s,%s,%s,%s,%s,%s,%s,%s) \n\n \"\"\"\n params = (\n self[\"url_object_id\"][0], self[\"url\"][0], self['title'][0],\n self['date'][0], self['writer_id'][0], self['views_num'][0],\n self['comments_num'][0],self['main_content'][0]\n )\n return insert_sql, params"
},
{
"identifier": "get_md5",
"path": "ArticleSpider/utils/common.py",
"snippet": "def get_md5(url):\n m = hashlib.md5()\n m.update(url.encode(\"utf-8\"))\n return m.hexdigest()"
},
{
"identifier": "RedisSpider",
"path": "scrapy_redis/spiders.py",
"snippet": "class RedisSpider(RedisMixin, Spider):\n \"\"\"Spider that reads urls from redis queue when idle.\n\n Attributes\n ----------\n redis_key : str (default: REDIS_START_URLS_KEY)\n Redis key where to fetch start URLs from..\n redis_batch_size : int (default: CONCURRENT_REQUESTS)\n Number of messages to fetch from redis on each attempt.\n redis_encoding : str (default: REDIS_ENCODING)\n Encoding to use when decoding messages from redis queue.\n\n Settings\n --------\n REDIS_START_URLS_KEY : str (default: \"<spider.name>:start_urls\")\n Default Redis key where to fetch start URLs from..\n REDIS_START_URLS_BATCH_SIZE : int (deprecated by CONCURRENT_REQUESTS)\n Default number of messages to fetch from redis on each attempt.\n REDIS_START_URLS_AS_SET : bool (default: False)\n Use SET operations to retrieve messages from the redis queue. If False,\n the messages are retrieve using the LPOP command.\n REDIS_ENCODING : str (default: \"utf-8\")\n Default encoding to use when decoding messages from redis queue.\n\n \"\"\"\n\n @classmethod\n def from_crawler(cls, crawler, *args, **kwargs):\n obj = super(RedisSpider, cls).from_crawler(crawler, *args, **kwargs)\n obj.setup_redis(crawler)\n return obj"
}
] | import scrapy
import datetime
import re
from scrapy.http import Request
from urllib import parse
from ..items import CnblogItem
from ..utils.common import get_md5
from scrapy.loader import ItemLoader
from scrapy_redis.spiders import RedisSpider | 1,196 |
class CnblogSpider(scrapy.Spider):
name = "cnblog"
allowed_domains = ["www.cnblogs.com"]
start_urls = ["https://www.cnblogs.com/sitehome/p/1"]
# redis_key = 'cnblog:start_urls'
next_url = "https://www.cnblogs.com/sitehome/p/{0}"
# headers = {
# "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
# }
def parse(self, response):
all_urls = response.css('div.post-list a::attr(href)').extract()
all_urls = [parse.urljoin(response.url, url) for url in all_urls]
for url in all_urls:
match_obj = re.match('(.*.cnblogs.com/(.*)/p/.*.html)',url)
if match_obj:
request_url = match_obj.group(1)
writer_id = match_obj.group(2)
yield Request(url=request_url,meta={'writer_id':writer_id},callback=self.parse_detail)
for x in range(2,100):
yield Request(url=self.next_url.format(x), callback=self.parse)
def parse_detail(self,response):
item_loader = ItemLoader(item=CnblogItem(), response=response)
item_loader.add_value("url", response.url)
|
class CnblogSpider(scrapy.Spider):
name = "cnblog"
allowed_domains = ["www.cnblogs.com"]
start_urls = ["https://www.cnblogs.com/sitehome/p/1"]
# redis_key = 'cnblog:start_urls'
next_url = "https://www.cnblogs.com/sitehome/p/{0}"
# headers = {
# "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
# }
def parse(self, response):
all_urls = response.css('div.post-list a::attr(href)').extract()
all_urls = [parse.urljoin(response.url, url) for url in all_urls]
for url in all_urls:
match_obj = re.match('(.*.cnblogs.com/(.*)/p/.*.html)',url)
if match_obj:
request_url = match_obj.group(1)
writer_id = match_obj.group(2)
yield Request(url=request_url,meta={'writer_id':writer_id},callback=self.parse_detail)
for x in range(2,100):
yield Request(url=self.next_url.format(x), callback=self.parse)
def parse_detail(self,response):
item_loader = ItemLoader(item=CnblogItem(), response=response)
item_loader.add_value("url", response.url) | item_loader.add_value("url_object_id", get_md5(response.url)) | 1 | 2023-12-29 15:05:22+00:00 | 2k |
Asa-Nisi-Masa/christmas-tree | christmas_tree/calculations/compute_coords.py | [
{
"identifier": "PATH_SAVE",
"path": "christmas_tree/common/settings.py",
"snippet": "PATH_SAVE = \"coordinates.csv\""
},
{
"identifier": "TOTAL_LEDS",
"path": "christmas_tree/common/settings.py",
"snippet": "TOTAL_LEDS = 500"
}
] | from collections import defaultdict, namedtuple
from pathlib import Path
from typing import Dict, List, Optional
from tqdm import tqdm
from christmas_tree.common.settings import PATH_SAVE, TOTAL_LEDS
import cv2
import numpy as np | 1,251 | contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
centers = []
for contour in contours:
M = cv2.moments(contour)
if M["m00"] != 0:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
centers.append(Point(cX, cY))
return centers
def _compute_correct_positions(contour_centers: List[Point]) -> Optional[Point]:
if len(contour_centers) == 0:
return None
if len(contour_centers) == 1:
return contour_centers[0]
min_dist = float("inf")
for i in range(len(contour_centers)):
for j in range(i, len(contour_centers)):
if i == j:
continue
xi, yi = contour_centers[i]
xj, yj = contour_centers[j]
dist2 = (xi - xj) ** 2 + (yi - yj) ** 2
if dist2 < min_dist:
min_dist = dist2
if min_dist < MAX_DIST**2:
centers = np.array(contour_centers).mean(axis=0)
return Point(int(centers[0]), int(centers[1]))
return None
def _get_map_from_index_to_position(angle: int) -> Dict[int, Point]:
map_index_to_position = {}
total_errors = 0
for i in range(TOTAL_LEDS):
path = Path("frames") / str(angle) / f"{i}.jpg"
frame = cv2.imread(str(path))
contour_centers = _compute_naive_positions(frame)
center = _compute_correct_positions(contour_centers)
if center is None:
total_errors += 1
map_index_to_position[i] = None
else:
map_index_to_position[i] = _get_uv(center, width, height)
return map_index_to_position
def get_map_index_to_angle_position() -> Dict[int, Dict[int, Point]]:
# map_index_to_angle_position = map from LED index to a map from angle to LED position
angles_to_centers = {}
map_index_to_angle_position = defaultdict(dict)
for angle in tqdm(ANGLES):
map_index_to_position = _get_map_from_index_to_position(angle)
angles_to_centers[angle] = map_index_to_position
for i in range(TOTAL_LEDS):
map_index_to_angle_position[i][angle] = map_index_to_position[i]
return map_index_to_angle_position
def validate_led_positions(map_index_to_angle_position: Dict[int, Dict[int, Point]]) -> None:
total_no_centers = 0
for i in range(TOTAL_LEDS):
num_angles_center_is_defined = sum(el is not None for el in map_index_to_angle_position[i].values())
if num_angles_center_is_defined < 1:
print(f"No center can be found for {i} LED")
total_no_centers += 1
print("Total no LED positions found:", total_no_centers)
def get_frames_to_xyz(map_index_to_angle_position: Dict[int, Dict[int, Point]]) -> Dict[int, tuple]:
# frames_to_xyz = map from LED index to LED position
frames_to_xyz = {}
for i in range(TOTAL_LEDS):
sum_x = 0
sum_z = 0
sum_y = 0
non_nulls = 0
for angle in ANGLES:
radian = np.pi / 180 * angle
center = map_index_to_angle_position[i][angle]
if center is not None:
sum_x += center.x * np.cos(radian)
sum_z += center.x * np.sin(radian)
sum_y += center.y
non_nulls += 1
if non_nulls > 0:
x = 1 / non_nulls * sum_x
z = 1 / non_nulls * sum_z
y = 1 / non_nulls * sum_y
frames_to_xyz[i] = (x, y, z)
else:
frames_to_xyz[i] = None
return frames_to_xyz
def save_to_file(frames_to_xyz: Dict[int, tuple]):
|
### Adjust these three parameters if lots of LEDs cannot be detected
LOWER_THRESHOLD = 135
UPPER_THRESHOLD = 255
MAX_DIST = 40
###
ANGLES = [0, 45, 90, 135, 180, 225, 270, 315]
Point = namedtuple("Point", ["x", "y"])
# get height and width of images from one of the frames
path = Path("frames") / str(ANGLES[0]) / "0.jpg"
frame = cv2.imread(str(path))
height, width, _ = frame.shape
def _get_uv(center: Point, width: int, height: int) -> Point:
px, py = center
u = 2 / width * px - 1
v = -2 / height * py + 1
return Point(u, v)
def _compute_naive_positions(image: np.ndarray) -> List[Point]:
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray, LOWER_THRESHOLD, UPPER_THRESHOLD, cv2.THRESH_BINARY)
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
centers = []
for contour in contours:
M = cv2.moments(contour)
if M["m00"] != 0:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
centers.append(Point(cX, cY))
return centers
def _compute_correct_positions(contour_centers: List[Point]) -> Optional[Point]:
if len(contour_centers) == 0:
return None
if len(contour_centers) == 1:
return contour_centers[0]
min_dist = float("inf")
for i in range(len(contour_centers)):
for j in range(i, len(contour_centers)):
if i == j:
continue
xi, yi = contour_centers[i]
xj, yj = contour_centers[j]
dist2 = (xi - xj) ** 2 + (yi - yj) ** 2
if dist2 < min_dist:
min_dist = dist2
if min_dist < MAX_DIST**2:
centers = np.array(contour_centers).mean(axis=0)
return Point(int(centers[0]), int(centers[1]))
return None
def _get_map_from_index_to_position(angle: int) -> Dict[int, Point]:
map_index_to_position = {}
total_errors = 0
for i in range(TOTAL_LEDS):
path = Path("frames") / str(angle) / f"{i}.jpg"
frame = cv2.imread(str(path))
contour_centers = _compute_naive_positions(frame)
center = _compute_correct_positions(contour_centers)
if center is None:
total_errors += 1
map_index_to_position[i] = None
else:
map_index_to_position[i] = _get_uv(center, width, height)
return map_index_to_position
def get_map_index_to_angle_position() -> Dict[int, Dict[int, Point]]:
# map_index_to_angle_position = map from LED index to a map from angle to LED position
angles_to_centers = {}
map_index_to_angle_position = defaultdict(dict)
for angle in tqdm(ANGLES):
map_index_to_position = _get_map_from_index_to_position(angle)
angles_to_centers[angle] = map_index_to_position
for i in range(TOTAL_LEDS):
map_index_to_angle_position[i][angle] = map_index_to_position[i]
return map_index_to_angle_position
def validate_led_positions(map_index_to_angle_position: Dict[int, Dict[int, Point]]) -> None:
total_no_centers = 0
for i in range(TOTAL_LEDS):
num_angles_center_is_defined = sum(el is not None for el in map_index_to_angle_position[i].values())
if num_angles_center_is_defined < 1:
print(f"No center can be found for {i} LED")
total_no_centers += 1
print("Total no LED positions found:", total_no_centers)
def get_frames_to_xyz(map_index_to_angle_position: Dict[int, Dict[int, Point]]) -> Dict[int, tuple]:
# frames_to_xyz = map from LED index to LED position
frames_to_xyz = {}
for i in range(TOTAL_LEDS):
sum_x = 0
sum_z = 0
sum_y = 0
non_nulls = 0
for angle in ANGLES:
radian = np.pi / 180 * angle
center = map_index_to_angle_position[i][angle]
if center is not None:
sum_x += center.x * np.cos(radian)
sum_z += center.x * np.sin(radian)
sum_y += center.y
non_nulls += 1
if non_nulls > 0:
x = 1 / non_nulls * sum_x
z = 1 / non_nulls * sum_z
y = 1 / non_nulls * sum_y
frames_to_xyz[i] = (x, y, z)
else:
frames_to_xyz[i] = None
return frames_to_xyz
def save_to_file(frames_to_xyz: Dict[int, tuple]): | with open(PATH_SAVE, "w") as file: | 0 | 2023-12-30 12:25:19+00:00 | 2k |
YYJeffrey/july_server | app/api/v2/message.py | [
{
"identifier": "auth",
"path": "app/lib/token.py",
"snippet": "def verify_token(token):\ndef generate_token(user_id):"
},
{
"identifier": "db",
"path": "app/model/base.py",
"snippet": "class BaseModel(db.Model):\n def __getitem__(self, key):\n def init_on_load(self):\n def __set_fields(self):\n def _set_fields(self):\n def keys(self):\n def hide(self, *keys):\n def append(self, *keys):\n def status(self):\n def get_or_404(cls, **kwargs):\n def all_or_404(cls, **kwargs):\n def get_one(cls, **kwargs):\n def get_all(cls, **kwargs):\n def create(cls, commit: bool = True, **kwargs):\n def update(self, commit: bool = True, **kwargs):\n def save(self, commit: bool = True):\n def delete(self, commit: bool = True, soft: bool = True):\n def get_pagination(cls, not_del: bool = True, **kwargs):"
},
{
"identifier": "Success",
"path": "app/lib/exception.py",
"snippet": "class Success(APIException):\n code = 200\n msg_code = 0\n msg = '成功'"
},
{
"identifier": "Updated",
"path": "app/lib/exception.py",
"snippet": "class Updated(APIException):\n code = 200\n msg_code = 2\n msg = '更新成功'"
},
{
"identifier": "RedPrint",
"path": "app/lib/red_print.py",
"snippet": "class RedPrint(object):\n \"\"\"\n 红图用于嵌套路由使用\n \"\"\"\n\n def __init__(self, name):\n self.name = name\n self.mound = []\n\n def route(self, rule, **options):\n def decorator(func):\n if 'strict_slashes' not in options:\n options['strict_slashes'] = False\n self.mound.append((func, rule, options))\n return func\n\n return decorator\n\n def register(self, bp, url_prefix=None):\n if url_prefix is None:\n url_prefix = f\"/{self.name}\"\n\n for func, rule, options in self.mound:\n endpoint = f\"{self.name}/{options.pop('endpoint', func.__name__)}\"\n bp.add_url_rule(url_prefix + rule, endpoint, func, **options)"
},
{
"identifier": "Message",
"path": "app/model/message.py",
"snippet": "class Message(BaseModel):\n \"\"\"\n 消息模型\n \"\"\"\n __tablename__ = 'message'\n\n content = Column(String(256), nullable=False, comment='内容')\n category = Column(Enum(MessageCategory), default=MessageCategory.COMMENT, comment='类型')\n is_read = Column(Boolean, default=False, comment='是否已读')\n is_anon = Column(Boolean, default=False, comment='是否匿名')\n user_id = Column(String(32), nullable=False, index=True, comment='用户标识')\n action_user_id = Column(String(32), nullable=False, index=True, comment='发起用户标识')\n topic_id = Column(String(32), index=True, comment='话题标识')\n\n def __str__(self):\n return self.content\n\n def _set_fields(self):\n self.append('push_time')\n self._exclude.extend(['action_user_id'])\n\n @property\n def push_time(self):\n \"\"\"\n 发布时间\n \"\"\"\n if self.create_time is not None:\n return datetime_to_hint(self.create_time)\n return None"
},
{
"identifier": "get_message_list",
"path": "app/service/message.py",
"snippet": "def get_message_list():\n \"\"\"\n 查询消息列表\n \"\"\"\n action_user = aliased(User)\n\n data = db.session.query(Message, User, action_user, Topic) \\\n .outerjoin(User, Message.user_id == User.id) \\\n .outerjoin(action_user, Message.action_user_id == action_user.id) \\\n .outerjoin(Topic, Message.topic_id == Topic.id) \\\n .filter(Message.user_id == g.user.id) \\\n .filter(Message.is_read.is_(False)) \\\n .filter(Message.delete_time.is_(None)) \\\n .all()\n\n for index, (message, _, message.action_user, message.topic) in enumerate(data):\n if message.topic is not None:\n if message.topic.is_anon and g.user.id != message.topic.user_id:\n message.topic.user = None\n else:\n message.topic.user = User.get_one(id=message.topic.user_id)\n if message.topic.video_id is not None:\n message.topic.video = Video.get_one(id=message.topic.video_id)\n else:\n message.topic.video = None\n message.topic.append('user', 'video')\n\n if message.is_anon:\n message.action_user = None\n\n message.append('action_user', 'topic')\n data[index] = message\n\n return data"
}
] | from flask import g
from app import auth, db
from app.lib.exception import Success, Updated
from app.lib.red_print import RedPrint
from app.model.message import Message
from app.service.message import get_message_list | 1,304 | # -*- coding: utf-8 -*-
"""
:copyright: (c) 2023 by Jeffrey.
:license: Apache 2.0, see LICENSE for more details.
"""
api = RedPrint('message')
@api.route('/', methods=['GET'])
@auth.login_required
def get_messages():
"""
获取消息
"""
messages = get_message_list()
return Success(data=messages)
@api.route('/read', methods=['POST'])
@auth.login_required
def read_messages():
"""
已读信息
"""
with db.auto_commit():
db.session.query(Message).filter_by(user_id=g.user.id, is_read=False).update({Message.is_read: True})
| # -*- coding: utf-8 -*-
"""
:copyright: (c) 2023 by Jeffrey.
:license: Apache 2.0, see LICENSE for more details.
"""
api = RedPrint('message')
@api.route('/', methods=['GET'])
@auth.login_required
def get_messages():
"""
获取消息
"""
messages = get_message_list()
return Success(data=messages)
@api.route('/read', methods=['POST'])
@auth.login_required
def read_messages():
"""
已读信息
"""
with db.auto_commit():
db.session.query(Message).filter_by(user_id=g.user.id, is_read=False).update({Message.is_read: True})
| return Updated() | 3 | 2023-12-30 04:08:35+00:00 | 2k |
lchen1019/Image_Cropper | ISAT/widgets/polygon.py | [
{
"identifier": "Object",
"path": "ISAT/annotation.py",
"snippet": "class Object:\n def __init__(self, category:str, group:int, segmentation, area, layer, bbox, iscrowd=0, note=''):\n self.category = category\n self.group = group\n self.segmentation = segmentation\n self.area = area\n self.layer = layer\n self.bbox = bbox\n self.iscrowd = iscrowd\n self.note = note"
},
{
"identifier": "STATUSMode",
"path": "ISAT/configs.py",
"snippet": "class STATUSMode(Enum):\n VIEW = 0\n CREATE = 1\n EDIT = 2"
},
{
"identifier": "CLICKMode",
"path": "ISAT/configs.py",
"snippet": "class CLICKMode(Enum):\n POSITIVE = 0\n NEGATIVE = 1"
},
{
"identifier": "DRAWMode",
"path": "ISAT/configs.py",
"snippet": "class DRAWMode(Enum):\n POLYGON = 0\n SEGMENTANYTHING = 1"
},
{
"identifier": "CONTOURMode",
"path": "ISAT/configs.py",
"snippet": "class CONTOURMode(Enum):\n SAVE_MAX_ONLY = 0 # 只保留最多顶点的mask(一般为最大面积)\n SAVE_EXTERNAL = 1 # 只保留外轮廓\n SAVE_ALL = 2 # 保留所有轮廓"
}
] | from PyQt5 import QtCore, QtWidgets, QtGui
from ISAT.annotation import Object
from ISAT.configs import STATUSMode, CLICKMode, DRAWMode, CONTOURMode
import typing | 1,085 | # -*- coding: utf-8 -*-
# @Author : LG
class PromptPoint(QtWidgets.QGraphicsPathItem):
def __init__(self, pos, type=0):
super(PromptPoint, self).__init__()
self.color = QtGui.QColor('#0000FF') if type==0 else QtGui.QColor('#00FF00')
self.color.setAlpha(255)
self.painterpath = QtGui.QPainterPath()
self.painterpath.addEllipse(
QtCore.QRectF(-1, -1, 2, 2))
self.setPath(self.painterpath)
self.setBrush(self.color)
self.setPen(QtGui.QPen(self.color, 3))
self.setZValue(1e5)
self.setPos(pos)
class Vertex(QtWidgets.QGraphicsPathItem):
def __init__(self, polygon, color, nohover_size=2):
super(Vertex, self).__init__()
self.polygon = polygon
self.color = color
self.color.setAlpha(255)
self.nohover_size = nohover_size
self.hover_size = self.nohover_size + 2
self.line_width = 0
self.nohover = QtGui.QPainterPath()
self.nohover.addEllipse(QtCore.QRectF(-self.nohover_size//2, -self.nohover_size//2, self.nohover_size, self.nohover_size))
self.hover = QtGui.QPainterPath()
self.hover.addRect(QtCore.QRectF(-self.nohover_size//2, -self.nohover_size//2, self.nohover_size, self.nohover_size))
self.setPath(self.nohover)
self.setBrush(self.color)
self.setPen(QtGui.QPen(self.color, self.line_width))
self.setFlag(QtWidgets.QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, True)
self.setFlag(QtWidgets.QGraphicsItem.GraphicsItemFlag.ItemIsMovable, True)
self.setFlag(QtWidgets.QGraphicsItem.GraphicsItemFlag.ItemSendsGeometryChanges, True)
self.setAcceptHoverEvents(True)
self.setZValue(1e5)
def setColor(self, color):
self.color = QtGui.QColor(color)
self.color.setAlpha(255)
self.setPen(QtGui.QPen(self.color, self.line_width))
self.setBrush(self.color)
def itemChange(self, change: 'QtWidgets.QGraphicsItem.GraphicsItemChange', value: typing.Any):
if change == QtWidgets.QGraphicsItem.GraphicsItemChange.ItemSelectedHasChanged:
self.scene().mainwindow.actionDelete.setEnabled(self.isSelected())
if self.isSelected():
selected_color = QtGui.QColor('#00A0FF')
self.setBrush(selected_color)
else:
self.setBrush(self.color)
if change == QtWidgets.QGraphicsItem.GraphicsItemChange.ItemPositionChange and self.isEnabled():
# 限制顶点移动到图外
if value.x() < 0:
value.setX(0)
if value.x() > self.scene().width()-1:
value.setX(self.scene().width()-1)
if value.y() < 0:
value.setY(0)
if value.y() > self.scene().height()-1:
value.setY(self.scene().height()-1)
index = self.polygon.vertexs.index(self)
self.polygon.movePoint(index, value)
return super(Vertex, self).itemChange(change, value)
def hoverEnterEvent(self, event: 'QGraphicsSceneHoverEvent'):
| # -*- coding: utf-8 -*-
# @Author : LG
class PromptPoint(QtWidgets.QGraphicsPathItem):
def __init__(self, pos, type=0):
super(PromptPoint, self).__init__()
self.color = QtGui.QColor('#0000FF') if type==0 else QtGui.QColor('#00FF00')
self.color.setAlpha(255)
self.painterpath = QtGui.QPainterPath()
self.painterpath.addEllipse(
QtCore.QRectF(-1, -1, 2, 2))
self.setPath(self.painterpath)
self.setBrush(self.color)
self.setPen(QtGui.QPen(self.color, 3))
self.setZValue(1e5)
self.setPos(pos)
class Vertex(QtWidgets.QGraphicsPathItem):
def __init__(self, polygon, color, nohover_size=2):
super(Vertex, self).__init__()
self.polygon = polygon
self.color = color
self.color.setAlpha(255)
self.nohover_size = nohover_size
self.hover_size = self.nohover_size + 2
self.line_width = 0
self.nohover = QtGui.QPainterPath()
self.nohover.addEllipse(QtCore.QRectF(-self.nohover_size//2, -self.nohover_size//2, self.nohover_size, self.nohover_size))
self.hover = QtGui.QPainterPath()
self.hover.addRect(QtCore.QRectF(-self.nohover_size//2, -self.nohover_size//2, self.nohover_size, self.nohover_size))
self.setPath(self.nohover)
self.setBrush(self.color)
self.setPen(QtGui.QPen(self.color, self.line_width))
self.setFlag(QtWidgets.QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, True)
self.setFlag(QtWidgets.QGraphicsItem.GraphicsItemFlag.ItemIsMovable, True)
self.setFlag(QtWidgets.QGraphicsItem.GraphicsItemFlag.ItemSendsGeometryChanges, True)
self.setAcceptHoverEvents(True)
self.setZValue(1e5)
def setColor(self, color):
self.color = QtGui.QColor(color)
self.color.setAlpha(255)
self.setPen(QtGui.QPen(self.color, self.line_width))
self.setBrush(self.color)
def itemChange(self, change: 'QtWidgets.QGraphicsItem.GraphicsItemChange', value: typing.Any):
if change == QtWidgets.QGraphicsItem.GraphicsItemChange.ItemSelectedHasChanged:
self.scene().mainwindow.actionDelete.setEnabled(self.isSelected())
if self.isSelected():
selected_color = QtGui.QColor('#00A0FF')
self.setBrush(selected_color)
else:
self.setBrush(self.color)
if change == QtWidgets.QGraphicsItem.GraphicsItemChange.ItemPositionChange and self.isEnabled():
# 限制顶点移动到图外
if value.x() < 0:
value.setX(0)
if value.x() > self.scene().width()-1:
value.setX(self.scene().width()-1)
if value.y() < 0:
value.setY(0)
if value.y() > self.scene().height()-1:
value.setY(self.scene().height()-1)
index = self.polygon.vertexs.index(self)
self.polygon.movePoint(index, value)
return super(Vertex, self).itemChange(change, value)
def hoverEnterEvent(self, event: 'QGraphicsSceneHoverEvent'): | if self.scene().mode == STATUSMode.CREATE: # CREATE | 1 | 2023-12-24 16:19:16+00:00 | 2k |
aoki-h-jp/crypto-listed-detector | crypto_listed_detector/detector.py | [
{
"identifier": "BinanceFetch",
"path": "crypto_listed_detector/fetchapi/binance.py",
"snippet": "class BinanceFetch:\n _BASE_URL = \"https://fapi.binance.com\"\n\n def __init__(self):\n pass\n\n def get_linear_ticker(self):\n url = self._BASE_URL + \"/fapi/v1/exchangeInfo\"\n response = requests.get(url)\n return response.json()\n\n def get_all_linear_symbols(self):\n return [item[\"symbol\"] for item in self.get_linear_ticker()[\"symbols\"]]"
},
{
"identifier": "BitgetFetch",
"path": "crypto_listed_detector/fetchapi/bitget.py",
"snippet": "class BitgetFetch:\n _BASE_URL = \"https://api.bitget.com\"\n\n def __init__(self):\n pass\n\n def get_linear_ticker(self):\n url = self._BASE_URL + \"/api/v2/mix/market/tickers?productType=USDT-FUTURES\"\n response = requests.get(url)\n return response.json()\n\n def get_all_linear_symbols(self):\n return [item[\"symbol\"] for item in self.get_linear_ticker()[\"data\"]]"
},
{
"identifier": "BybitFetch",
"path": "crypto_listed_detector/fetchapi/bybit.py",
"snippet": "class BybitFetch:\n _BASE_URL = \"https://api.bybit.com\"\n\n def __init__(self):\n pass\n\n def get_linear_ticker(self):\n url = self._BASE_URL + \"/v5/market/tickers?category=linear\"\n response = requests.get(url)\n return response.json()\n\n def get_all_linear_symbols(self):\n return [item[\"symbol\"] for item in self.get_linear_ticker()[\"result\"][\"list\"]]"
},
{
"identifier": "GateioFetch",
"path": "crypto_listed_detector/fetchapi/gateio.py",
"snippet": "class GateioFetch:\n _BASE_URL = \"https://api.gateio.ws\"\n\n def __init__(self):\n pass\n\n def get_contracts(self):\n url = self._BASE_URL + \"/api/v4/futures/usdt/contracts\"\n response = requests.get(url)\n return response.json()\n\n def get_all_linear_symbols(self):\n return [item[\"name\"] for item in self.get_contracts()]"
},
{
"identifier": "KucoinFetch",
"path": "crypto_listed_detector/fetchapi/kucoin.py",
"snippet": "class KucoinFetch:\n _BASE_URL = \"https://api-futures.kucoin.com\"\n\n def __init__(self):\n pass\n\n def get_linear_ticker(self):\n url = self._BASE_URL + \"/api/v1/contracts/active\"\n response = requests.get(url)\n return response.json()\n\n def get_all_linear_symbols(self):\n return [item[\"symbol\"] for item in self.get_linear_ticker()[\"data\"]]"
},
{
"identifier": "MexcFetch",
"path": "crypto_listed_detector/fetchapi/mexc.py",
"snippet": "class MexcFetch:\n _BASE_URL = \"https://contract.mexc.com\"\n\n def __init__(self):\n pass\n\n def get_risk_reverse(self):\n url = self._BASE_URL + \"/api/v1/contract/risk_reverse\"\n response = requests.get(url)\n return response.json()\n\n def get_all_linear_symbols(self):\n return [item[\"symbol\"] for item in self.get_risk_reverse()[\"data\"]]"
},
{
"identifier": "OkxFetch",
"path": "crypto_listed_detector/fetchapi/okx.py",
"snippet": "class OkxFetch:\n _BASE_URL = \"https://www.okx.com\"\n\n def __init__(self):\n pass\n\n def get_linear_ticker(self):\n url = self._BASE_URL + \"/api/v5/public/instruments?instType=SWAP\"\n response = requests.get(url)\n return response.json()\n\n def get_all_linear_symbols(self):\n return [item[\"instId\"] for item in self.get_linear_ticker()[\"data\"]]"
},
{
"identifier": "PhemexFetch",
"path": "crypto_listed_detector/fetchapi/phemex.py",
"snippet": "class PhemexFetch:\n _BASE_URL = \"https://api.phemex.com\"\n\n def __init__(self):\n pass\n\n def get_linear_products(self):\n url = self._BASE_URL + \"/public/products\"\n response = requests.get(url)\n return response.json()\n\n def get_all_linear_symbols(self):\n return [\n item[\"symbol\"] for item in self.get_linear_products()[\"data\"][\"products\"]\n ]"
},
{
"identifier": "PionexFetch",
"path": "crypto_listed_detector/fetchapi/pionex.py",
"snippet": "class PionexFetch:\n _BASE_URL = \"https://api.pionex.com\"\n\n def __init__(self):\n pass\n\n def get_linear_symbols(self):\n url = self._BASE_URL + \"/api/v1/common/symbols\"\n response = requests.get(url)\n return response.json()\n\n def get_all_linear_symbols(self):\n return [item[\"symbol\"] for item in self.get_linear_symbols()[\"data\"][\"symbols\"]]"
},
{
"identifier": "XtcomFetch",
"path": "crypto_listed_detector/fetchapi/xtcom.py",
"snippet": "class XtcomFetch:\n _BASE_URL = \"https://fapi.xt.com\"\n\n def __init__(self):\n pass\n\n def get_linear_ticker(self):\n url = self._BASE_URL + \"/future/market/v1/public/cg/contracts\"\n response = requests.get(url)\n return response.json()\n\n def get_all_linear_symbols(self):\n return [item[\"symbol\"] for item in self.get_linear_ticker()]"
}
] | import json
from crypto_listed_detector.fetchapi.binance import BinanceFetch
from crypto_listed_detector.fetchapi.bitget import BitgetFetch
from crypto_listed_detector.fetchapi.bybit import BybitFetch
from crypto_listed_detector.fetchapi.gateio import GateioFetch
from crypto_listed_detector.fetchapi.kucoin import KucoinFetch
from crypto_listed_detector.fetchapi.mexc import MexcFetch
from crypto_listed_detector.fetchapi.okx import OkxFetch
from crypto_listed_detector.fetchapi.phemex import PhemexFetch
from crypto_listed_detector.fetchapi.pionex import PionexFetch
from crypto_listed_detector.fetchapi.xtcom import XtcomFetch | 1,437 | """
crypto-listed-detector
"""
class Detector:
def __init__(self):
"""
Init all fetchers
"""
| """
crypto-listed-detector
"""
class Detector:
def __init__(self):
"""
Init all fetchers
""" | self.bybit = BybitFetch() | 2 | 2023-12-27 10:39:18+00:00 | 2k |
harvestingmoon/StableVisionBot | bot.py | [
{
"identifier": "BackEnd",
"path": "backend.py",
"snippet": "class BackEnd:\n def __init__(self,model_id) -> None:\n self.model = None\n self.curr_picture = None \n self.final_img = None\n self.call = {1:False,2:False}\n self.model_id = (model_id if model_id else \"stabilityai/stable-diffusion-2\")\n def change_picture(self,array): # picture received from user is a byte array need to convert into image \n picture = io.BytesIO(array)\n image = Image.open(picture).convert(\"RGB\")\n self.curr_picture = image # store it temp \n def final_(self,img):\n self.final_img = img\n def get_final(self):\n return self.final_img\n def get_picture(self):\n return self.curr_picture\n def change_model(self,model):\n self.model = model\n def get_model(self):\n return self.model\n def get_call(self):\n return self.call\n def call_engine(self,type):\n model_id = self.model_id\n call = self.get_call()\n device = (\"cuda\" if torch.cuda.is_available() else \"cpu\")\n if not call[type]:\n if True in list(call.values()):\n for k,v in call.items():\n if v == True:\n call[k] = False\n if type == 1:\n scheduler = DDIMScheduler.from_pretrained(model_id,subfolder = \"scheduler\")\n pipe = StableDiffusionPipeline.from_pretrained(model_id,scheduler= scheduler, torch_dtype = torch.float16)\n else:\n pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model_id,torch_dtype = torch.float16)\n pipe = pipe.to(device)\n self.model = pipe\n call[type] = True\n return self.get_model()"
},
{
"identifier": "post_process",
"path": "backend.py",
"snippet": "def post_process(image,to_doc = True):\n def resize_image(image, max_size):\n quality = 95\n while True:\n with io.BytesIO() as file:\n image.save(file, format='JPEG', quality=quality)\n size = file.tell() / 1024 # Size in KB\n if size <= max_size:\n break\n quality -= 5 # Decrease quality by 5. You can change it as needed.\n if quality < 0:\n raise Exception(\"Cannot reduce image size under the limit without losing too much quality.\")\n return image\n \n def enforce_ratio(image,max_ratio): # stick to 20; 1\n width, height = image.size\n ratio = width / height\n\n if ratio > max_ratio:\n new_width = height * max_ratio\n image = image.resize((int(new_width), height), Image.ANTIALIAS)\n elif ratio < 1 / max_ratio:\n new_height = width * max_ratio\n image = image.resize((width, int(new_height)), Image.ANTIALIAS)\n\n return image\n\n def limit_pixels(image, max_pixels):\n width, height = image.size\n current_pixels = width * height\n\n if current_pixels > max_pixels:\n # Calculate the scale factor\n scale_factor = (max_pixels / current_pixels) ** 0.5\n new_width = int(width * scale_factor)\n new_height = int(height * scale_factor)\n image = image.resize((new_width, new_height), Image.ANTIALIAS)\n\n return image\n\n def pil_to_file(image):\n file = io.BytesIO()\n if to_doc:\n image.save(file, format='PDF')\n else:\n image.save(file,format = 'JPG')\n file.seek(0)\n return file\n if not to_doc:\n image = resize_image(image, 9 * 1024)\n image = enforce_ratio(image,18)\n image = limit_pixels(image, 8000)\n image = pil_to_file(image)\n return image"
}
] | from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove, Update,InlineKeyboardButton,InlineKeyboardMarkup
from telegram.ext import (
Application,
CommandHandler,
ContextTypes,
ConversationHandler,
MessageHandler,
CallbackQueryHandler,
filters,
CallbackContext,
)
from backend import BackEnd,post_process
from PIL import Image
import numpy as np
import json
import logging
import yaml
import emoji
import asyncio | 1,161 | # Simple telegram bot that takes uses stable diffusion
''' Importing YAML'''
with open("config .yaml", "r") as f:
config = yaml.safe_load(f)
model = config['model']
api_key = config['API_KEY']
''' States for bot'''
ONE,TWO,DOCUMENT,PHOTO = range(4)
START,T2IMG,T2IMG2,IMG2IMG,IMG2IMG2,OUTPUT= range(6)
''' User logging'''
logging.basicConfig(
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s', level = logging.INFO
)
logger = logging.getLogger(__name__)
''' Important pipeline for stable diffusion'''
| # Simple telegram bot that takes uses stable diffusion
''' Importing YAML'''
with open("config .yaml", "r") as f:
config = yaml.safe_load(f)
model = config['model']
api_key = config['API_KEY']
''' States for bot'''
ONE,TWO,DOCUMENT,PHOTO = range(4)
START,T2IMG,T2IMG2,IMG2IMG,IMG2IMG2,OUTPUT= range(6)
''' User logging'''
logging.basicConfig(
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s', level = logging.INFO
)
logger = logging.getLogger(__name__)
''' Important pipeline for stable diffusion''' | engine = BackEnd(model) | 0 | 2023-12-22 07:25:26+00:00 | 2k |
khabbazan/Mattermost-Subscriptions | apps/chat/gql/subscriptions.py | [
{
"identifier": "MessageQueryType",
"path": "apps/chat/gql/types.py",
"snippet": "class MessageQueryType(graphene.ObjectType):\n \"\"\"\n GraphQL type representing a message in a chat system.\n \"\"\"\n\n id = graphene.String(description=\"Unique identifier of the message.\")\n\n def resolve_id(root, info):\n \"\"\"Resolve the message ID.\"\"\"\n return root[\"id\"]\n\n message = graphene.String(description=\"Content of the message.\")\n\n def resolve_message(root, info):\n \"\"\"Resolve the message content, with special handling for system messages.\"\"\"\n if root[\"type\"] == \"system_join_team\":\n return \"Welcome\"\n return root[\"message\"]\n\n create_at = graphene.String(description=\"Timestamp when the message was created.\")\n\n def resolve_create_at(root, info):\n \"\"\"Resolve the creation timestamp of the message.\"\"\"\n return root[\"create_at\"]\n\n owner = graphene.Field(UserQueryType, description=\"User who sent the message.\")\n\n def resolve_owner(root, info):\n \"\"\"Resolve the owner (sender) of the message.\"\"\"\n if isinstance(info.context, WSGIRequest) or isinstance(info.context, ASGIRequest):\n return User.objects.filter(username=root[\"username\"]).first()\n else:\n return User.objects.filter(username=root[\"username\"]).afirst()\n\n type = graphene.String(description=\"Type of the message, e.g., 'text', 'image', 'system_join_team'.\")\n\n def resolve_type(root, info):\n \"\"\"Resolve the type of the message.\"\"\"\n return root[\"type\"]"
},
{
"identifier": "subscription",
"path": "helpers/channels_graphql_ws/subscription.py",
"snippet": "LOG = logging.getLogger(__name__)\nclass Subscription(graphene.ObjectType):\nclass SubscriptionOptions(graphene.types.objecttype.ObjectTypeOptions):\n def broadcast(cls, *, group=None, payload=None):\n async def broadcast_async(cls, *, group=None, payload=None):\n def broadcast_sync(cls, *, group=None, payload=None):\n def unsubscribe(cls, *, group=None):\n async def unsubscribe_async(cls, *, group=None):\n def unsubscribe_sync(cls, *, group=None):\n def Field(cls, name=None, description=None, deprecation_reason=None, required=False): # noqa\n def __init_subclass_with_meta__(\n cls,\n subscribe=None,\n publish=None,\n unsubscribed=None,\n output=None,\n arguments=None,\n _meta=None,\n **options,\n ): # pylint: disable=arguments-renamed\n def _group_name(cls, group=None):\n def _channel_layer(cls):"
}
] | import graphene
from apps.chat.gql.types import MessageQueryType
from helpers.channels_graphql_ws import subscription | 652 |
class OnNewChatMessage(subscription.Subscription):
"""
GraphQL Subscription for new chat messages.
This subscription allows clients to listen for new messages on a specified channel.
"""
channel_identifier = graphene.String()
|
class OnNewChatMessage(subscription.Subscription):
"""
GraphQL Subscription for new chat messages.
This subscription allows clients to listen for new messages on a specified channel.
"""
channel_identifier = graphene.String() | message = graphene.Field(MessageQueryType) | 0 | 2023-12-25 11:40:56+00:00 | 2k |
Hatins/DEOE | models/detection/yolox_extension/models/yolo_pafpn.py | [
{
"identifier": "BaseConv",
"path": "models/detection/yolox/models/network_blocks.py",
"snippet": "class BaseConv(nn.Module):\n \"\"\"A Conv2d -> Batchnorm -> silu/leaky relu block\"\"\"\n\n def __init__(\n self, in_channels, out_channels, ksize, stride, groups=1, bias=False, act=\"silu\"\n ):\n super().__init__()\n # same padding\n pad = (ksize - 1) // 2\n self.conv = nn.Conv2d(\n in_channels,\n out_channels,\n kernel_size=ksize,\n stride=stride,\n padding=pad,\n groups=groups,\n bias=bias,\n )\n self.bn = nn.BatchNorm2d(out_channels)\n self.act = get_activation(act, inplace=True)\n\n def forward(self, x):\n return self.act(self.bn(self.conv(x)))\n\n def fuseforward(self, x):\n return self.act(self.conv(x))"
},
{
"identifier": "CSPLayer",
"path": "models/detection/yolox/models/network_blocks.py",
"snippet": "class CSPLayer(nn.Module):\n \"\"\"C3 in yolov5, CSP Bottleneck with 3 convolutions\"\"\"\n\n def __init__(\n self,\n in_channels,\n out_channels,\n n=1,\n shortcut=True,\n expansion=0.5,\n depthwise=False,\n act=\"silu\",\n ):\n \"\"\"\n Args:\n in_channels (int): input channels.\n out_channels (int): output channels.\n n (int): number of Bottlenecks. Default value: 1.\n \"\"\"\n # ch_in, ch_out, number, shortcut, groups, expansion\n super().__init__()\n hidden_channels = int(out_channels * expansion) # hidden channels\n self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act)\n self.conv2 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act)\n self.conv3 = BaseConv(2 * hidden_channels, out_channels, 1, stride=1, act=act)\n module_list = [\n Bottleneck(\n hidden_channels, hidden_channels, shortcut, 1.0, depthwise, act=act\n )\n for _ in range(n)\n ]\n self.m = nn.Sequential(*module_list)\n\n def forward(self, x):\n x_1 = self.conv1(x)\n x_2 = self.conv2(x)\n x_1 = self.m(x_1)\n x = torch.cat((x_1, x_2), dim=1)\n return self.conv3(x)"
},
{
"identifier": "DWConv",
"path": "models/detection/yolox/models/network_blocks.py",
"snippet": "class DWConv(nn.Module):\n \"\"\"Depthwise Conv + Conv\"\"\"\n\n def __init__(self, in_channels, out_channels, ksize, stride=1, act=\"silu\"):\n super().__init__()\n self.dconv = BaseConv(\n in_channels,\n in_channels,\n ksize=ksize,\n stride=stride,\n groups=in_channels,\n act=act,\n )\n self.pconv = BaseConv(\n in_channels, out_channels, ksize=1, stride=1, groups=1, act=act\n )\n\n def forward(self, x):\n x = self.dconv(x)\n return self.pconv(x)"
},
{
"identifier": "BackboneFeatures",
"path": "data/utils/types.py",
"snippet": "class DataType(Enum):\nclass DatasetType(Enum):\nclass DatasetMode(Enum):\nclass DatasetSamplingMode(StrEnum):\nclass ObjDetOutput(Enum):\n EV_REPR = auto()\n FLOW = auto()\n IMAGE = auto()\n OBJLABELS = auto()\n OBJLABELS_SEQ = auto()\n IS_PADDED_MASK = auto()\n IS_FIRST_SAMPLE = auto()\n TOKEN_MASK = auto()\n GEN1 = auto()\n GEN4 = auto()\n TRAIN = auto()\n VALIDATION = auto()\n TESTING = auto()\n RANDOM = 'random'\n STREAM = 'stream'\n MIXED = 'mixed'\n LABELS_PROPH = auto()\n PRED_PROPH = auto()\n EV_REPR = auto()\n SKIP_VIZ = auto()"
}
] | from typing import Dict, Optional, Tuple
from torch import compile as th_compile
from ...yolox.models.network_blocks import BaseConv, CSPLayer, DWConv
from data.utils.types import BackboneFeatures
import torch as th
import torch.nn as nn | 1,394 | """
Original Yolox PAFPN code with slight modifications
"""
try:
except ImportError:
th_compile = None
class YOLOPAFPN(nn.Module):
"""
Removed the direct dependency on the backbone.
"""
def __init__(
self,
depth: float = 1.0,
in_stages: Tuple[int, ...] = (2, 3, 4),
in_channels: Tuple[int, ...] = (256, 512, 1024),
depthwise: bool = False,
act: str = "silu",
compile_cfg: Optional[Dict] = None,
):
super().__init__()
assert len(in_stages) == len(in_channels)
assert len(in_channels) == 3, 'Current implementation only for 3 feature maps'
self.in_features = in_stages
self.in_channels = in_channels
Conv = DWConv if depthwise else BaseConv
###### Compile if requested ######
if compile_cfg is not None:
compile_mdl = compile_cfg['enable']
if compile_mdl and th_compile is not None:
self.forward = th_compile(self.forward, **compile_cfg['args'])
elif compile_mdl:
print('Could not compile PAFPN because torch.compile is not available')
##################################
self.upsample = lambda x: nn.functional.interpolate(x, scale_factor=2, mode='nearest-exact')
self.lateral_conv0 = BaseConv(
in_channels[2], in_channels[1], 1, 1, act=act
)
| """
Original Yolox PAFPN code with slight modifications
"""
try:
except ImportError:
th_compile = None
class YOLOPAFPN(nn.Module):
"""
Removed the direct dependency on the backbone.
"""
def __init__(
self,
depth: float = 1.0,
in_stages: Tuple[int, ...] = (2, 3, 4),
in_channels: Tuple[int, ...] = (256, 512, 1024),
depthwise: bool = False,
act: str = "silu",
compile_cfg: Optional[Dict] = None,
):
super().__init__()
assert len(in_stages) == len(in_channels)
assert len(in_channels) == 3, 'Current implementation only for 3 feature maps'
self.in_features = in_stages
self.in_channels = in_channels
Conv = DWConv if depthwise else BaseConv
###### Compile if requested ######
if compile_cfg is not None:
compile_mdl = compile_cfg['enable']
if compile_mdl and th_compile is not None:
self.forward = th_compile(self.forward, **compile_cfg['args'])
elif compile_mdl:
print('Could not compile PAFPN because torch.compile is not available')
##################################
self.upsample = lambda x: nn.functional.interpolate(x, scale_factor=2, mode='nearest-exact')
self.lateral_conv0 = BaseConv(
in_channels[2], in_channels[1], 1, 1, act=act
) | self.C3_p4 = CSPLayer( | 1 | 2023-12-29 04:04:34+00:00 | 2k |
yeyingdege/ctr-din-pytorch | din/model.py | [
{
"identifier": "EmbeddingLayer",
"path": "din/embedding.py",
"snippet": "class EmbeddingLayer(nn.Module):\n def __init__(self, num_emb, embedding_dim):\n super(EmbeddingLayer, self).__init__()\n\n self.embeddings = nn.Embedding(num_emb, embedding_dim)\n nn.init.xavier_uniform_(self.embeddings.weight)\n\n def forward(self, batch_cat):\n batch_embedding = self.embeddings(batch_cat)\n return batch_embedding"
},
{
"identifier": "FCLayer",
"path": "din/fc.py",
"snippet": "class FCLayer(nn.Module):\n def __init__(self, input_size, \n hidden_size, \n bias, \n batch_norm=False,\n dropout_rate=0., \n activation='relu', \n use_sigmoid=False, \n dice_dim=2):\n super(FCLayer, self).__init__()\n\n self.use_sigmoid = use_sigmoid\n\n layers = []\n if batch_norm:\n layers.append(nn.BatchNorm1d(input_size))\n \n # FC -> activation -> dropout\n layers.append(nn.Linear(input_size, hidden_size, bias=bias))\n if activation.lower() == 'relu':\n layers.append(nn.ReLU(inplace=True))\n elif activation.lower() == 'dice':\n assert dice_dim\n layers.append(Dice(hidden_size, dim=dice_dim))\n elif activation.lower() == 'prelu':\n layers.append(nn.PReLU())\n else: # None\n pass\n layers.append(nn.Dropout(p=dropout_rate))\n\n self.fc = nn.Sequential(*layers)\n if self.use_sigmoid:\n self.output_layer = nn.Sigmoid()\n \n # weight initialization xavier_normal (or glorot_normal in keras, tf)\n for m in self.modules():\n if isinstance(m, nn.Linear):\n nn.init.xavier_normal_(m.weight, gain=1.0)\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n pass\n\n\n def forward(self, x):\n return self.output_layer(self.fc(x)) if self.use_sigmoid else self.fc(x)"
},
{
"identifier": "DinAttentionLayer",
"path": "din/attention.py",
"snippet": "class DinAttentionLayer(nn.Module):\n def __init__(self, embedding_dim=36):\n super(DinAttentionLayer, self).__init__()\n\n self.local_att = LocalActivationUnit(hidden_size=[80, 40, 1], \n bias=[True, True, True], \n embedding_dim=embedding_dim, \n batch_norm=False)\n\n \n def forward(self, query_ad, user_behavior, user_behavior_length):\n # query ad : batch_size * embedding_size\n # user behavior : batch_size * time_seq_len * embedding_size\n # user behavior length: batch_size * time_seq_len\n # output : batch_size * 1 * embedding_size\n \n attention_score = self.local_att(query_ad, user_behavior) # [128, 100, 1]\n attention_score = torch.transpose(attention_score, 1, 2) # B * 1 * T\n \n # define mask by length\n user_behavior_length = user_behavior_length.type(torch.LongTensor)\n mask = torch.arange(user_behavior.size(1))[None, :] < user_behavior_length[:, None]\n \n # mask\n score = torch.mul(attention_score, mask.type(torch.cuda.FloatTensor)) # batch_size *\n score = F.softmax(score, dim=-1)\n\n # multiply weight\n output = torch.matmul(score, user_behavior)\n\n return output"
}
] | import torch
import torch.nn as nn
from torch.nn import functional as F
from .embedding import EmbeddingLayer
from .fc import FCLayer
from .attention import DinAttentionLayer | 1,019 |
class DeepInterestNetwork(nn.Module):
def __init__(self, n_uid, n_mid, n_cat, EMBEDDING_DIM, HIDDEN_DIM=[162,200,80,2]):
super(DeepInterestNetwork, self).__init__()
self.embedding_dim = EMBEDDING_DIM
self.hid_dim = HIDDEN_DIM
# embeddings
self.uid_embeddings = EmbeddingLayer(n_uid, self.embedding_dim)
self.mid_embeddings = EmbeddingLayer(n_mid, self.embedding_dim)
self.cat_embeddings = EmbeddingLayer(n_cat, self.embedding_dim)
self.attn = DinAttentionLayer(embedding_dim=self.embedding_dim*2)
mlp_input_dim = self.embedding_dim * 9
self.mlp = nn.Sequential(
|
class DeepInterestNetwork(nn.Module):
def __init__(self, n_uid, n_mid, n_cat, EMBEDDING_DIM, HIDDEN_DIM=[162,200,80,2]):
super(DeepInterestNetwork, self).__init__()
self.embedding_dim = EMBEDDING_DIM
self.hid_dim = HIDDEN_DIM
# embeddings
self.uid_embeddings = EmbeddingLayer(n_uid, self.embedding_dim)
self.mid_embeddings = EmbeddingLayer(n_mid, self.embedding_dim)
self.cat_embeddings = EmbeddingLayer(n_cat, self.embedding_dim)
self.attn = DinAttentionLayer(embedding_dim=self.embedding_dim*2)
mlp_input_dim = self.embedding_dim * 9
self.mlp = nn.Sequential( | FCLayer(mlp_input_dim, hidden_size=self.hid_dim[1], bias=True, batch_norm=True, activation='dice'), | 1 | 2023-12-27 05:53:50+00:00 | 2k |
iamlooper/VIC-TG-Bot | app/core/client/filters.py | [
{
"identifier": "Config",
"path": "app/config.py",
"snippet": "class _Config:\n class CMD:\n def __init__(self, func, path, doc):\n def __init__(self):\n def __str__(self):"
},
{
"identifier": "Conversation",
"path": "app/core/client/conversation.py",
"snippet": "class Conversation:\n CONVO_DICT: dict[int, \"Conversation\"] = {}\n\n class DuplicateConvo(Exception):\n def __init__(self, chat: str | int):\n super().__init__(f\"Conversation already started with {chat} \")\n\n def __init__(\n self, chat_id: int | str, filters: Filter | None = None, timeout: int = 10\n ):\n self.chat_id = chat_id\n self.filters = filters\n self.timeout = timeout\n self.responses: list = []\n self.set_future()\n from app import bot\n\n self._client = bot\n\n def __str__(self):\n return json.dumps(self.__dict__, indent=4, ensure_ascii=False, default=str)\n\n def set_future(self, *args, **kwargs):\n future = asyncio.Future()\n future.add_done_callback(self.set_future)\n self.response = future\n\n async def get_response(self, timeout: int | None = None) -> Message | None:\n try:\n resp_future: asyncio.Future = await asyncio.wait_for(\n self.response, timeout=timeout or self.timeout\n )\n return resp_future\n except asyncio.TimeoutError:\n raise TimeoutError(\"Conversation Timeout\")\n\n async def send_message(\n self,\n text: str,\n timeout=0,\n get_response=False,\n **kwargs,\n ) -> Message | tuple[Message, Message]:\n message = await self._client.send_message(\n chat_id=self.chat_id, text=text, **kwargs\n )\n if get_response:\n response = await self.get_response(timeout=timeout or self.timeout)\n return message, response\n return message\n\n async def send_document(\n self,\n document,\n caption=\"\",\n timeout=0,\n get_response=False,\n **kwargs,\n ) -> Message | tuple[Message, Message]:\n message = await self._client.send_document(\n chat_id=self.chat_id,\n document=document,\n caption=caption,\n force_document=True,\n **kwargs,\n )\n if get_response:\n response = await self.get_response(timeout=timeout or self.timeout)\n return message, response\n return message\n\n async def __aenter__(self) -> \"Conversation\":\n if isinstance(self.chat_id, str):\n self.chat_id = (await self._client.get_chat(self.chat_id)).id\n if (\n self.chat_id in Conversation.CONVO_DICT.keys()\n and Conversation.CONVO_DICT[self.chat_id].filters == self.filters\n ):\n raise self.DuplicateConvo(self.chat_id)\n Conversation.CONVO_DICT[self.chat_id] = self\n return self\n\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n Conversation.CONVO_DICT.pop(self.chat_id, None)\n if not self.response.done():\n self.response.cancel()"
}
] | from pyrogram import filters as _filters
from pyrogram.types import Message
from app import Config
from app.core.client.conversation import Conversation | 867 |
# Overall BOT filters
convo_filter = _filters.create(
lambda _, __, message: (message.chat.id in Conversation.CONVO_DICT.keys())
and (not message.reactions)
)
def cmd_check(message: Message, trigger: str) -> bool:
start_str = message.text.split(maxsplit=1)[0]
cmd = start_str.replace(trigger, "", 1)
|
# Overall BOT filters
convo_filter = _filters.create(
lambda _, __, message: (message.chat.id in Conversation.CONVO_DICT.keys())
and (not message.reactions)
)
def cmd_check(message: Message, trigger: str) -> bool:
start_str = message.text.split(maxsplit=1)[0]
cmd = start_str.replace(trigger, "", 1) | return bool(cmd in Config.CMD_DICT.keys()) | 0 | 2023-12-24 05:00:58+00:00 | 2k |
Enthusiasm23/primkit | src/primkit/utils/LoggerSetup.py | [
{
"identifier": "LOG_LEVEL",
"path": "src/primkit/config.py",
"snippet": "LOG_LEVEL = os.environ.get('LOG_LEVEL', 'INFO') # 日志级别"
},
{
"identifier": "LOG_FILE",
"path": "src/primkit/config.py",
"snippet": "LOG_FILE = os.environ.get('LOG_FILE', None) # 日志文件路径"
},
{
"identifier": "LOG_FORMAT",
"path": "src/primkit/config.py",
"snippet": "LOG_FORMAT = os.environ.get('LOG_FORMAT', '%(asctime)s - %(name)s - %(levelname)s - %(message)s') # 日志格式"
},
{
"identifier": "LOG_FILE_MODE",
"path": "src/primkit/config.py",
"snippet": "LOG_FILE_MODE = os.environ.get('LOG_FILE_MODE', 'a') # 日志文件模式"
},
{
"identifier": "MAX_LOG_SIZE",
"path": "src/primkit/config.py",
"snippet": "MAX_LOG_SIZE = int(os.environ.get('MAX_LOG_SIZE', 10485760)) # 最大日志文件大小(10MB)"
},
{
"identifier": "BACKUP_COUNT",
"path": "src/primkit/config.py",
"snippet": "BACKUP_COUNT = int(os.environ.get('BACKUP_COUNT', 3)) # 保留的日志文件数量"
},
{
"identifier": "LOG_STREAM",
"path": "src/primkit/config.py",
"snippet": "LOG_STREAM = os.environ.get('LOG_STREAM', 'True').lower() in ('true', '1', 't') # 是否输出日志到控制台"
}
] | import logging
import logging.handlers
from ..config import LOG_LEVEL, LOG_FILE, LOG_FORMAT, \
LOG_FILE_MODE, MAX_LOG_SIZE, BACKUP_COUNT, LOG_STREAM | 741 |
def setup_logging(
level=None,
log_file=None,
format=None,
log_file_mode=None,
max_log_size=None,
backup_count=None,
stream=None
):
"""
Configure logging for the application.
:param level: The logging level, e.g., 'DEBUG', 'INFO', 'WARNING'. Defaults to value from config.py but can be overridden by user input.
:param log_file: Path to the log file. If specified, logs will be written to the file. Defaults to value from config.py but can be overridden by user input.
:param format: The format for the logging messages. Defaults to value from config.py but can be overridden by user input.
:param log_file_mode: The mode for writing to the log file, e.g., 'a' for append mode. Defaults to value from config.py but can be overridden by user input.
:param max_log_size: The maximum size of the log file in bytes. When exceeded, the log will rotate. Defaults to value from config.py but can be overridden by user input.
:param backup_count: The number of backup log files to keep. Defaults to value from config.py but can be overridden by user input.
:param stream: Whether to output logs to the console. Defaults to value from config.py but can be overridden by user input.
The function uses the default configuration or configuration provided by the user. Logging can be directed to a file, console, or both based on parameters.
"""
# Use the default configuration or user-provided configuration
if level is not None:
if isinstance(level, int):
log_level = level
else:
log_level = getattr(logging, level.upper(), logging.INFO)
else:
if isinstance(LOG_LEVEL, int):
log_level = LOG_LEVEL
else:
log_level = getattr(logging, LOG_LEVEL.upper(), logging.INFO)
log_file = log_file if log_file is not None else LOG_FILE
format = format if format is not None else LOG_FORMAT
|
def setup_logging(
level=None,
log_file=None,
format=None,
log_file_mode=None,
max_log_size=None,
backup_count=None,
stream=None
):
"""
Configure logging for the application.
:param level: The logging level, e.g., 'DEBUG', 'INFO', 'WARNING'. Defaults to value from config.py but can be overridden by user input.
:param log_file: Path to the log file. If specified, logs will be written to the file. Defaults to value from config.py but can be overridden by user input.
:param format: The format for the logging messages. Defaults to value from config.py but can be overridden by user input.
:param log_file_mode: The mode for writing to the log file, e.g., 'a' for append mode. Defaults to value from config.py but can be overridden by user input.
:param max_log_size: The maximum size of the log file in bytes. When exceeded, the log will rotate. Defaults to value from config.py but can be overridden by user input.
:param backup_count: The number of backup log files to keep. Defaults to value from config.py but can be overridden by user input.
:param stream: Whether to output logs to the console. Defaults to value from config.py but can be overridden by user input.
The function uses the default configuration or configuration provided by the user. Logging can be directed to a file, console, or both based on parameters.
"""
# Use the default configuration or user-provided configuration
if level is not None:
if isinstance(level, int):
log_level = level
else:
log_level = getattr(logging, level.upper(), logging.INFO)
else:
if isinstance(LOG_LEVEL, int):
log_level = LOG_LEVEL
else:
log_level = getattr(logging, LOG_LEVEL.upper(), logging.INFO)
log_file = log_file if log_file is not None else LOG_FILE
format = format if format is not None else LOG_FORMAT | log_file_mode = log_file_mode if log_file_mode is not None else LOG_FILE_MODE | 3 | 2023-12-25 14:12:46+00:00 | 2k |
Wangyuhao06/2022-adhoc | src/env.py | [
{
"identifier": "random_waypoint",
"path": "pymobility/models/mobility.py",
"snippet": "def random_waypoint(*args, **kwargs):\n return iter(RandomWaypoint(*args, **kwargs))"
},
{
"identifier": "Node",
"path": "src/node.py",
"snippet": "class Node(object):\n def __init__(self,id_node):\n super(Node, self).__init__()\n #multi-agent sys setting\n self.node_max=36\n self.act_range=self.node_max-1 #最大邻居范围\n # current agent-property setting\n self.id=id_node#该节点id\n # 1 - packets\n self.packets_ToSend_id=[]#该节点当前待传的包\n self.packets_id_list=[]#该节点至今为止保存过的包id\n \n self.sending_flag=0\n self.rec_flag=0\n \n self.trans_task_send=Queue(maxsize=1)#该节点当前传输的任务\n self.trans_taskID_rec=[]#该节点当前接收的任务\n # 2 - energy\n self.current_amp_send=0#节点当前发送增益--------动作\n #self.current_amp_receive=0#节点当前接收增益--------动作\n \n self.current_power_send=0#节点当前发送功率\n self.current_power_receive=0#节点当前接收功率\n self.power_list=[]#节点使用能量记录\n \n self.energy_consumption=0#截至现在能量消耗\n # 3 - freq\n self.current_freqB=[1]#当前选用频谱块--------动作\n self.freqB_list=[1]#频谱块历史\n # 4 - topology\n self.neibor_idlist=[]\n self.next_hop_id=-1#下一条节点id--------动作\n # 5 - observation\n #self.ob_send=[]\n \n # def observation_rec(self,send_node):\n # if len(self.ob_send)==0 or len(send_node.ob_send)==0 :\n # raise ValueError(\"send observation unfinished\")\n # self.ob_rec.append(self.ob_send[-1])\n # self.ob_rec.append(send_node.ob_send[-1])\n # return self.ob_rec\n \n \n def get_send_action(self,ob,action_space):\n \n ###缺省决策###\n \n #改变属性\n return self.current_amp_send,self.current_freqB,self.next_hop_id\n \n def get_rec_action(self,ob):\n \n ###缺省决策###\n \n #改变属性\n return self.current_amp_receive "
},
{
"identifier": "Packet",
"path": "src/packet.py",
"snippet": "class Packet(object):\n def __init__(self,id_packet,packet_size,ori_node_id,dst_node_id,time_start_0):\n super(Packet, self).__init__()\n self.id=id_packet\n self.size=packet_size\n #节点属性\n self.ori_node_id=ori_node_id\n self.cur_node_id=ori_node_id\n self.dst_node_id=dst_node_id\n self.node_list=[ori_node_id]\n #T-T属性\n self.cur_trans_task_id=-100\n self.in_TR=0\n self.trans_task_IDlist=[]\n #路由属性\n self.time_start=time_start_0\n self.time_use=0\n self.arrive_flag=0\n \n def packet_trans_update(self,trans_task):\n if trans_task.trans_property[2]!=self.id:\n raise ValueError('trans_task not matched')\n self.cur_trans_task_id=trans_task.id"
},
{
"identifier": "Trans_task",
"path": "src/transtask.py",
"snippet": "class Trans_task(object):\n def __init__(self,trans_id,node_send,node_rec,packet):\n self.id=trans_id\n self.trans_property=(node_send.id,node_rec.id,packet.id)#基本属性\n self.packsize=packet.size\n ####frequency block info####\n self.FreqB_occup=node_send.current_freqB #占用频谱块id\n ####SINR and Capacity####\n self.SNR_C=([],1)#Y(SNR,Capacity)-----------------[X(timeslot1:SNR,Capacity),(timeslot2:SNR,Capacity),...]\n ####time of trans####\n self.time_use=1#int(self.packsize/self.SNR_C[1])+1\n self.time_cnt=0\n self.finish_flag=0\n ####energy setting####\n self.energy_property = (node_send.current_amp_send,RECAMP)\n self.energy_consume=(node_send.current_amp_send*packet.size*PACKENERGY,RECAMP*packet.size*PACKENERGY)\n self.power_consume=(round(node_send.current_amp_send*packet.size*PACKENERGY/self.time_use,6),round(RECAMP*packet.size*PACKENERGY/self.time_use,6))\n \n def show_info(self):\n return self.trans_property[0],self.trans_property[1],self.trans_property[2]\n \n def Trans_task_update(self):\n if self.finish_flag:\n return 1\n if self.time_cnt>=self.time_use:\n self.finish_flag=1\n return 1\n elif self.time_cnt<self.time_use:\n self.time_cnt+=1\n return 0\n \n \n #trans_task=tuple([],{},(node_send_id,node_send_amp,node_rec_id,node_rec_amp,packet_id),0)\n #tuple:([占用频谱块id],{(timeslot1:SNR,Capacity),(timeslot2:SNR,Capacity),...},(基本属性:发送节点id,发送增益,接收节点id,接收增益,包id),完成标志位)"
}
] | import random
import numpy as np
from math import log2, log10
from queue import Queue
from pymobility.models.mobility import random_waypoint
from src.node import Node
from src.packet import Packet
from src.parameter import *
from src.transtask import Trans_task | 1,479 |
class Environment():
#初始化环境
def __init__(self):
#初始数据-最大节点数
self.node_max=NODE_MAX
self.node_space_size=NODE_MAX
self.node_moving_area=MOV_AREA
#初始化二维平面
|
class Environment():
#初始化环境
def __init__(self):
#初始数据-最大节点数
self.node_max=NODE_MAX
self.node_space_size=NODE_MAX
self.node_moving_area=MOV_AREA
#初始化二维平面 | self.geo_area = random_waypoint(self.node_max, dimensions=(MOV_AREA, MOV_AREA), velocity=(10, 15), wt_max=1.0) | 0 | 2023-12-30 09:35:30+00:00 | 2k |
karthicksivakumarp/gui_read_csv | main.py | [
{
"identifier": "read_csv_file",
"path": "read_from_csv/read_csv_file.py",
"snippet": "class read_csv_data:\r\n def __init__(self):\r\n def read_mult_csv_file(self):\r"
},
{
"identifier": "analyze_data",
"path": "data_analysis/analyze_data.py",
"snippet": "class analyze_csv_data:\n def __init__(self):\n def pass_data_frame(self, df_list, csv_filepaths, columns):\n def analyze_data_all(self):\n def analyze_data_single_csv(self, index):"
},
{
"identifier": "generate_report",
"path": "report_generation/generate_report.py",
"snippet": "class generate_report:\r\n def __init__(self):\r\n \"\"\"\r\n Constructor for the generate_report class.\r\n Initializes instance variables to store analysis data.\r\n Customize this file for your needs to generate report\r\n \"\"\"\r\n # Initialize instance variables to store analysis data\r\n self.analysis_data_1 = None\r\n self.analysis_data_2 = None\r\n self.analysis_data_3 = None\r\n\r\n def generate_report(self, data1, data2, data3):\r\n \"\"\"\r\n Method to generate a report by assigning analysis data to instance variables.\r\n\r\n Parameters:\r\n - data1: The first set of analysis data.\r\n - data2: The second set of analysis data.\r\n - data3: The third set of analysis data.\r\n \"\"\"\r\n # Assign data1 to analysis_data_1\r\n self.analysis_data_1 = data1\r\n # Assign data2 to analysis_data_2\r\n self.analysis_data_2 = data2\r\n # Assign data3 to analysis_data_3\r\n self.analysis_data_3 = data3\r\n\r\n # Print analysis_data_1\r\n print(\"Analysis Data 1:\")\r\n print(self.analysis_data_1)\r\n\r\n # Print analysis_data_2\r\n print(\"Analysis Data 2:\")\r\n print(self.analysis_data_2)\r\n\r\n # Print analysis_data_3\r\n print(\"Analysis Data 3:\")\r\n print(self.analysis_data_3)\r"
},
{
"identifier": "gui",
"path": "user_interface/gui.py",
"snippet": "class UI(Frame):\r\n def __init__(self, root, ui_read_csv, ui_data_analysis, ui_report_gen):\r\n def set_status_message(self, message):\r\n def init_menu_bar(self):\r\n def config_frame(self):\r\n def top_left_frame(self):\r\n def bottom_left_frame(self):\r\n def right_frame(self):\r\n def read_csv_files(self):\r\n def on_listbox_select(self, event):\r\n def analyze_csv_files(self):\r\n def analyze_all_csv_files(self):\r\n def generate_report_single(self):\r\n def generate_report_all(self):\r"
}
] | from read_from_csv import read_csv_file
from data_analysis import analyze_data
from report_generation import generate_report
from tkinter import Tk
from user_interface import gui
| 800 | # Import necessary modules
# Initialize CSV reader instance
read_csv = read_csv_file.read_csv_data()
# Obtain the function/method for reading multiple CSV files
# Note: "read_mult_csv_file" is a function or method defined in the "read_csv_file" module
main_read_csv = read_csv.read_mult_csv_file
# Initialize data analyzer instance
analyze_data = analyze_data.analyze_csv_data()
# Initialize report generator instance
report_gen = generate_report.generate_report()
# Create the main Tkinter window
root = Tk()
root.title('Csv DataAnalyzer') # Set the title of the Tkinter window
root.geometry("800x600") # Set the initial dimensions of the Tkinter window
# Create the user interface (GUI) using the UI class from the "user_interface" module
# Pass the necessary components (main_read_csv, analyze_data, report_gen) to the GUI
| # Import necessary modules
# Initialize CSV reader instance
read_csv = read_csv_file.read_csv_data()
# Obtain the function/method for reading multiple CSV files
# Note: "read_mult_csv_file" is a function or method defined in the "read_csv_file" module
main_read_csv = read_csv.read_mult_csv_file
# Initialize data analyzer instance
analyze_data = analyze_data.analyze_csv_data()
# Initialize report generator instance
report_gen = generate_report.generate_report()
# Create the main Tkinter window
root = Tk()
root.title('Csv DataAnalyzer') # Set the title of the Tkinter window
root.geometry("800x600") # Set the initial dimensions of the Tkinter window
# Create the user interface (GUI) using the UI class from the "user_interface" module
# Pass the necessary components (main_read_csv, analyze_data, report_gen) to the GUI
| gui.UI(root, main_read_csv, analyze_data, report_gen)
| 3 | 2023-12-25 18:49:42+00:00 | 2k |
Slenderman00/Ask-Surf | AskSurf/cli.py | [
{
"identifier": "load_settings",
"path": "AskSurf/settings.py",
"snippet": "def load_settings():\n # check if settings.toml exists\n if not settings_exist():\n create_settings()\n edit_settings()\n return load_settings()\n\n with open(own_dir / \"settings.toml\", \"r\") as f:\n settings = toml.load(f)\n\n return settings"
},
{
"identifier": "settings_exist",
"path": "AskSurf/settings.py",
"snippet": "def settings_exist():\n return (own_dir / \"settings.toml\").exists()"
},
{
"identifier": "edit_settings",
"path": "AskSurf/settings.py",
"snippet": "def edit_settings():\n os.system(f\"{select_editor()} {own_dir / 'settings.toml'}\")"
}
] | import os
import requests
import argparse
import tqdm
import time
import subprocess
import sys
from pathlib import Path
from halo import Halo
from .settings import load_settings, settings_exist, edit_settings | 795 |
settings = {}
own_dir = Path(__file__).parent.absolute()
question_pipe = own_dir / "question_pipe"
response_pipe = own_dir / "response_pipe"
def conditional_decorator(dec, condition):
def decorator(func):
if not condition:
# Return the function unchanged, not decorated.
return func
return dec(func)
return decorator
def parse_message(message):
# replace the tags with the correct color codes
message = message.replace("[RED]", "\033[31m")
message = message.replace("[YELLOW]", "\033[33m")
message = message.replace("[ORANGE]", "\033[33m")
message = message.replace("[GREEN]", "\033[32m")
message = message.replace("[PURPLE]", "\033[35m")
message = message.replace("[BLUE]", "\033[34m")
message = message.replace("[NORMAL]", "\033[0m")
# replace all end tags with the normal color code
message = message.replace("[/RED]", "\033[0m")
message = message.replace("[/YELLOW]", "\033[0m")
message = message.replace("[/ORANGE]", "\033[0m")
message = message.replace("[/GREEN]", "\033[0m")
message = message.replace("[/PURPLE]", "\033[0m")
message = message.replace("[/BLUE]", "\033[0m")
message = message.replace("[/NORMAL]", "\033[0m")
return message
def init():
if not model_exists():
print("Please select a model")
download_model(select_model())
if not settings_exist():
print("Please make sure the settings are correct")
settings = load_settings()
exit(1)
def main():
"""Main entry point for the application"""
init()
# parse the arguments
parser = argparse.ArgumentParser(description="AskSurf CLI")
parser.add_argument(
"question",
nargs=argparse.REMAINDER,
help="The question to ask Dolphin",
)
parser.add_argument(
"--model",
"-m",
action="store_true",
help="The model to use",
)
parser.add_argument(
"--delete",
"-d",
action="store_true",
help="Delete the model",
)
parser.add_argument(
"--kill",
"-k",
action="store_true",
help="Kill the Dolphin service",
)
parser.add_argument(
"--settings",
"-s",
action="store_true",
help="Edit the settings",
)
args = parser.parse_args()
if args.model:
download_model(select_model())
return
if args.delete:
delete_model()
return
if args.kill:
os.system("pkill -f dolphin_service.py")
return
if args.settings:
|
settings = {}
own_dir = Path(__file__).parent.absolute()
question_pipe = own_dir / "question_pipe"
response_pipe = own_dir / "response_pipe"
def conditional_decorator(dec, condition):
def decorator(func):
if not condition:
# Return the function unchanged, not decorated.
return func
return dec(func)
return decorator
def parse_message(message):
# replace the tags with the correct color codes
message = message.replace("[RED]", "\033[31m")
message = message.replace("[YELLOW]", "\033[33m")
message = message.replace("[ORANGE]", "\033[33m")
message = message.replace("[GREEN]", "\033[32m")
message = message.replace("[PURPLE]", "\033[35m")
message = message.replace("[BLUE]", "\033[34m")
message = message.replace("[NORMAL]", "\033[0m")
# replace all end tags with the normal color code
message = message.replace("[/RED]", "\033[0m")
message = message.replace("[/YELLOW]", "\033[0m")
message = message.replace("[/ORANGE]", "\033[0m")
message = message.replace("[/GREEN]", "\033[0m")
message = message.replace("[/PURPLE]", "\033[0m")
message = message.replace("[/BLUE]", "\033[0m")
message = message.replace("[/NORMAL]", "\033[0m")
return message
def init():
if not model_exists():
print("Please select a model")
download_model(select_model())
if not settings_exist():
print("Please make sure the settings are correct")
settings = load_settings()
exit(1)
def main():
"""Main entry point for the application"""
init()
# parse the arguments
parser = argparse.ArgumentParser(description="AskSurf CLI")
parser.add_argument(
"question",
nargs=argparse.REMAINDER,
help="The question to ask Dolphin",
)
parser.add_argument(
"--model",
"-m",
action="store_true",
help="The model to use",
)
parser.add_argument(
"--delete",
"-d",
action="store_true",
help="Delete the model",
)
parser.add_argument(
"--kill",
"-k",
action="store_true",
help="Kill the Dolphin service",
)
parser.add_argument(
"--settings",
"-s",
action="store_true",
help="Edit the settings",
)
args = parser.parse_args()
if args.model:
download_model(select_model())
return
if args.delete:
delete_model()
return
if args.kill:
os.system("pkill -f dolphin_service.py")
return
if args.settings: | edit_settings() | 2 | 2023-12-22 19:43:45+00:00 | 2k |
davidsvy/fractal_video | src/prepare_data/diving48.py | [
{
"identifier": "dataset_stats",
"path": "src/utils/data.py",
"snippet": "def dataset_stats(root, ext):\n n_train = len(find_files(dir=os.path.join(root, 'train'), ext=ext))\n n_val = len(find_files(dir=os.path.join(root, 'val'), ext=ext))\n n_test = len(find_files(dir=os.path.join(root, 'test'), ext=ext))\n print(f'train -> {n_train} files')\n print(f'val -> {n_val} files')\n print(f'test -> {n_test} files')"
},
{
"identifier": "run_bash",
"path": "src/utils/other.py",
"snippet": "def run_bash(command):\n return subprocess.run(command, shell=True, capture_output=True, text=True)"
}
] | import json
import os
import shutil
from ..utils.data import dataset_stats
from ..utils.other import run_bash | 685 |
def move_files(path_split, dir_src, dir_tgt, ext):
with open(path_split, 'r') as file:
lut = json.load(file)
for item in lut:
filename = f'{item["vid_name"]}.{ext}'
path_src = os.path.join(dir_src, filename)
label = str(item['label'])
dir_label = os.path.join(dir_tgt, label)
path_tgt = os.path.join(dir_label, filename)
os.makedirs(dir_label, exist_ok=True)
shutil.move(path_src, path_tgt)
def diving48(root):
"""
train -> 15943 files
val -> 2096 files
"""
url_data = 'http://www.svcl.ucsd.edu/projects/resound/Diving48_rgb.tar.gz'
url_split_train = 'http://www.svcl.ucsd.edu/projects/resound/Diving48_train.json'
url_split_val = 'http://www.svcl.ucsd.edu/projects/resound/Diving48_test.json'
path_data = os.path.join(root, os.path.basename(url_data))
path_split_train = os.path.join(root, os.path.basename(url_split_train))
path_split_val = os.path.join(root, os.path.basename(url_split_val))
dir_src = os.path.join(root, 'rgb')
dir_train = os.path.join(root, 'train')
dir_val = os.path.join(root, 'val')
ext = 'mp4'
os.makedirs(dir_train, exist_ok=True)
os.makedirs(dir_val, exist_ok=True)
print('\nDownloading DIVING48...')
run_bash(f'wget {url_split_train} -P {root}')
run_bash(f'wget {url_split_val} -P {root}')
run_bash(f'wget {url_data} -P {root}')
print('Extracting DIVING48...')
run_bash(f'tar -xf {path_data} -C {root}')
os.remove(path_data)
move_files(
path_split=path_split_train, dir_src=dir_src,
dir_tgt=dir_train, ext=ext
)
move_files(
path_split=path_split_val, dir_src=dir_src,
dir_tgt=dir_val, ext=ext
)
shutil.rmtree(dir_src)
os.remove(path_split_train)
os.remove(path_split_val)
|
def move_files(path_split, dir_src, dir_tgt, ext):
with open(path_split, 'r') as file:
lut = json.load(file)
for item in lut:
filename = f'{item["vid_name"]}.{ext}'
path_src = os.path.join(dir_src, filename)
label = str(item['label'])
dir_label = os.path.join(dir_tgt, label)
path_tgt = os.path.join(dir_label, filename)
os.makedirs(dir_label, exist_ok=True)
shutil.move(path_src, path_tgt)
def diving48(root):
"""
train -> 15943 files
val -> 2096 files
"""
url_data = 'http://www.svcl.ucsd.edu/projects/resound/Diving48_rgb.tar.gz'
url_split_train = 'http://www.svcl.ucsd.edu/projects/resound/Diving48_train.json'
url_split_val = 'http://www.svcl.ucsd.edu/projects/resound/Diving48_test.json'
path_data = os.path.join(root, os.path.basename(url_data))
path_split_train = os.path.join(root, os.path.basename(url_split_train))
path_split_val = os.path.join(root, os.path.basename(url_split_val))
dir_src = os.path.join(root, 'rgb')
dir_train = os.path.join(root, 'train')
dir_val = os.path.join(root, 'val')
ext = 'mp4'
os.makedirs(dir_train, exist_ok=True)
os.makedirs(dir_val, exist_ok=True)
print('\nDownloading DIVING48...')
run_bash(f'wget {url_split_train} -P {root}')
run_bash(f'wget {url_split_val} -P {root}')
run_bash(f'wget {url_data} -P {root}')
print('Extracting DIVING48...')
run_bash(f'tar -xf {path_data} -C {root}')
os.remove(path_data)
move_files(
path_split=path_split_train, dir_src=dir_src,
dir_tgt=dir_train, ext=ext
)
move_files(
path_split=path_split_val, dir_src=dir_src,
dir_tgt=dir_val, ext=ext
)
shutil.rmtree(dir_src)
os.remove(path_split_train)
os.remove(path_split_val)
| dataset_stats(root=root, ext=ext) | 0 | 2023-12-27 19:43:45+00:00 | 2k |
OpenBrickProtocolFoundation/client | main.py | [
{
"identifier": "Event",
"path": "tetrion.py",
"snippet": "class Event(NamedTuple):\n key: Key\n type: EventType\n frame: int"
},
{
"identifier": "EventType",
"path": "tetrion.py",
"snippet": "class EventType(Enum):\n PRESSED = 0\n RELEASED = 1"
},
{
"identifier": "Key",
"path": "tetrion.py",
"snippet": "class Key(Enum):\n LEFT = 0\n RIGHT = 1\n DROP = 2"
},
{
"identifier": "Tetrion",
"path": "tetrion.py",
"snippet": "class Tetrion:\n def __init__(self) -> None:\n self._tetrion = _create_tetrion()\n\n def try_get_active_tetromino(self) -> Optional[Tetromino]:\n return _tetrion_try_get_active_tetromino(self._tetrion)\n\n def simulate_up_until(self, frame: int) -> None:\n _tetrion_simulate_up_until(self._tetrion, frame)\n\n def enqueue_event(self, event: Event) -> None:\n _tetrion_enqueue_event(self._tetrion, event)\n\n def matrix(self) -> Matrix:\n matrix = _tetrion_matrix(self._tetrion)\n minos: list[TetrominoType] = []\n for y in range(self.height):\n for x in range(self.width):\n minos.append(_matrix_get(matrix, Vec2(x, y)))\n return Matrix(minos, self.width)\n\n @cached_property\n def width(self) -> int:\n return _tetrion_width()\n\n @cached_property\n def height(self) -> int:\n return _tetrion_height()\n\n def __enter__(self) -> Self:\n return self\n\n def __exit__(self, exc_type: type[BaseException], exc_val: BaseException, exc_tb: types.TracebackType) -> bool:\n self.__del__()\n return exc_type is None\n\n def __del__(self) -> None:\n if self._tetrion is not None:\n _destroy_tetrion(self._tetrion)\n self._tetrion = None"
}
] | import pygame
from tetrion import Event
from tetrion import EventType
from tetrion import Key
from tetrion import Tetrion | 754 |
def main() -> None:
frame = 0
with Tetrion() as tetrion:
pygame.init()
RECT_SIZE = 30
size = (RECT_SIZE * tetrion.width, (RECT_SIZE + 2) * tetrion.height)
screen = pygame.display.set_mode(size)
COLORS = [(0, 0, 0),
(0, 240, 240),
(0, 0, 240),
(240, 160, 0),
(240, 240, 0),
(0, 240, 0),
(160, 0, 240),
(240, 0, 0)]
done = False
clock = pygame.time.Clock()
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
done = True
elif event.key == pygame.K_a:
|
def main() -> None:
frame = 0
with Tetrion() as tetrion:
pygame.init()
RECT_SIZE = 30
size = (RECT_SIZE * tetrion.width, (RECT_SIZE + 2) * tetrion.height)
screen = pygame.display.set_mode(size)
COLORS = [(0, 0, 0),
(0, 240, 240),
(0, 0, 240),
(240, 160, 0),
(240, 240, 0),
(0, 240, 0),
(160, 0, 240),
(240, 0, 0)]
done = False
clock = pygame.time.Clock()
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
done = True
elif event.key == pygame.K_a: | tetrion.enqueue_event(Event(key=Key.LEFT, type=EventType.PRESSED, frame=frame)) | 2 | 2023-12-30 15:25:05+00:00 | 2k |
Birch-san/natten-fwd-ad | script/demo.py | [
{
"identifier": "NattenBlock",
"path": "src/natten_block.py",
"snippet": "class NattenBlock(Module):\n def __init__(self, d_model: int, d_head: int, kernel_size: int):\n super().__init__()\n self.d_head = d_head\n self.n_heads = d_model // d_head\n self.kernel_size = kernel_size\n self.qkv_proj = Linear(d_model, d_model * 3, bias=False)\n self.out_proj = Linear(d_model, d_model, bias=False)\n\n def forward(self, x: FloatTensor) -> FloatTensor:\n qkv = self.qkv_proj(x)\n q, k, v = rearrange(qkv, \"n h w (t nh e) -> t n nh h w e\", t=3, e=self.d_head)\n q = q / self.d_head**.5\n qk = natten2dqk(q, k, self.kernel_size, 1)\n a = qk.softmax(dim=-1)\n x = natten2dav(a, v, self.kernel_size, 1)\n x = rearrange(x, \"n nh h w e -> n h w (nh e)\")\n x = self.out_proj(x)\n return x"
},
{
"identifier": "NeighbourhoodAttnBlock",
"path": "src/hood_attn_block.py",
"snippet": "class NeighbourhoodAttnBlock(Module):\n def __init__(self, d_model: int, d_head: int, kernel_size: int):\n \"\"\"\n Pure-PyTorch implementation of neighbourhood attention.\n Uses global self-attention and a (very) complicated mask.\n Consequently it (probably) supports:\n - Mac\n - PyTorch Forward-Mode Autodiff\n - Nested tensors\n \"\"\"\n super().__init__()\n self.d_head = d_head\n self.n_heads = d_model // d_head\n self.kernel_size = kernel_size\n self.qkv_proj = Linear(d_model, d_model * 3, bias=False)\n self.out_proj = Linear(d_model, d_model, bias=False)\n\n def forward(self, x: FloatTensor) -> FloatTensor:\n _, h, w, _ = x.shape\n qkv = self.qkv_proj(x)\n q, k, v = rearrange(qkv, \"n h w (t nh e) -> t n nh (h w) e\", t=3, e=self.d_head)\n kernel_size=Dimensions(self.kernel_size, self.kernel_size)\n canvas_size=Dimensions(h, w)\n mask: BoolTensor = make_neighbourhood_mask(kernel_size, canvas_size, flatten_to_1d=True, device=x.device)\n mask = mask.unsqueeze(0).unsqueeze(0)\n x = scaled_dot_product_attention(q, k, v, attn_mask=mask)\n x = rearrange(x, \"n nh (h w) e -> n h w (nh e)\", h=h, w=w, e=self.d_head)\n x = self.out_proj(x)\n return x"
}
] | import torch
import torch.autograd.forward_ad as fwAD
from torch import inference_mode, enable_grad
from torch.backends.cuda import sdp_kernel
from src.natten_block import NattenBlock
from src.hood_attn_block import NeighbourhoodAttnBlock | 775 |
device=torch.device('cuda')
dtype=torch.bfloat16
seed=42
d_model=128
d_head=64
kernel_size=13
torch.manual_seed(seed)
|
device=torch.device('cuda')
dtype=torch.bfloat16
seed=42
d_model=128
d_head=64
kernel_size=13
torch.manual_seed(seed) | natten_block = NattenBlock(d_model, d_head=d_head, kernel_size=kernel_size).to(device=device, dtype=dtype) | 0 | 2023-12-22 22:57:36+00:00 | 2k |
ysyBrenda/Transformer-For-Geochemical-Anomaly-Detection | anomaly_detection.py | [
{
"identifier": "Transformer",
"path": "transformer/Models.py",
"snippet": "class Transformer(nn.Module):\n ''' A sequence to sequence model with attention mechanism. '''\n\n def __init__(\n self, src_pad_idx, trg_pad_idx,\n d_word_vec=38, d_model=38, d_inner=2048,\n n_layers=6, n_head=8, d_k=38, d_v=38, dropout=0.1, n_position=2000,\n ):\n\n super().__init__()\n\n self.src_pad_idx, self.trg_pad_idx = src_pad_idx, trg_pad_idx\n\n self.scale_prj = False #True\n self.d_model = d_model\n\n self.encoder = Encoder(\n n_position=n_position,\n d_word_vec=d_word_vec, d_model=d_model, d_inner=d_inner,\n n_layers=n_layers, n_head=n_head, d_k=d_k, d_v=d_v,\n pad_idx=src_pad_idx, dropout=dropout)\n\n self.decoder = Decoder(\n n_position=n_position,\n d_word_vec=d_word_vec, d_model=d_model, d_inner=d_inner,\n n_layers=n_layers, n_head=n_head, d_k=d_k, d_v=d_v,\n pad_idx=trg_pad_idx, dropout=dropout)\n\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n assert d_model == d_word_vec, \\\n 'To facilitate the residual connections, \\\n the dimensions of all module outputs shall be the same.'\n\n def forward(self, src_seq, trg_seq):\n\n src_mask=get_pad_mask(src_seq[:,:,0], self.src_pad_idx)\n trg_mask=trg_seq[:, :,0] #.unsqueeze(1)\n trg_mask = get_pad_mask(trg_mask, self.trg_pad_idx) & get_subsequent_mask(trg_mask)\n\n enc_output,enc_slf_attn_list = self.encoder(src_seq, src_mask,return_attns=True)\n dec_output, dec_slf_attn_list, dec_enc_attn_list= self.decoder(trg_seq, trg_mask, enc_output, src_mask,return_attns=True)\n\n seq_logit=dec_output\n\n return seq_logit.view(-1, seq_logit.size(2)),enc_slf_attn_list,dec_enc_attn_list"
},
{
"identifier": "Translator",
"path": "transformer/Translator.py",
"snippet": "class Translator(nn.Module):\n ''' Load a trained model and translate in beam search fashion. '''\n\n def __init__(\n self, model,src_pad_idx):\n \n\n super(Translator, self).__init__()\n\n self.src_pad_idx = src_pad_idx\n self.model = model\n self.model.eval()\n\n def _model_decode(self, trg_seq, enc_output, src_mask):\n trg_mask = get_subsequent_mask(trg_seq[:, :,0] )\n dec_output, dec_slf_attn,dec_enc_attn = self.model.decoder(trg_seq, trg_mask, enc_output, src_mask,return_attns=True)\n\n seq_logit=dec_output\n\n return seq_logit.view(-1, seq_logit.size(2)),dec_enc_attn\n\n\n def translate_sentence(self, src_seq,trg_seq):\n src_pad_idx= self.src_pad_idx\n\n with torch.no_grad():\n if len(src_seq.size())==2:\n src_seq=src_seq.unsqueeze(0)\n trg_seq=trg_seq.unsqueeze(0)\n src_mask = get_pad_mask(src_seq[:,:,0], src_pad_idx)\n enc_output, *_ = self.model.encoder(src_seq, src_mask)\n\n dec_output,dec_enc_attn = self._model_decode(trg_seq.unsqueeze(0), enc_output, src_mask)\n\n return dec_output,dec_enc_attn"
}
] | import torch
import argparse
import dill as pickle
import numpy as np
import calculate_anomalyscore
import torch.utils.data as Data
import time
from tqdm import tqdm
from transformer.Models import Transformer
from transformer.Translator import Translator | 1,019 | '''
geochemical anomaly detection
1,reconstruct geochemical data with trained model.
2,then, identify geochemical anomaly
Author: ysyBrenda
'''
def load_model(opt, device):
checkpoint = torch.load(opt.model, map_location=device)
model_opt = checkpoint['settings']
| '''
geochemical anomaly detection
1,reconstruct geochemical data with trained model.
2,then, identify geochemical anomaly
Author: ysyBrenda
'''
def load_model(opt, device):
checkpoint = torch.load(opt.model, map_location=device)
model_opt = checkpoint['settings']
| model = Transformer( | 0 | 2023-12-22 13:22:58+00:00 | 2k |
camenduru/MotionCtrl-hf | lvdm/modules/attention.py | [
{
"identifier": "conv_nd",
"path": "lvdm/basics.py",
"snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")"
},
{
"identifier": "normalization",
"path": "lvdm/basics.py",
"snippet": "def normalization(channels, num_groups=32):\n \"\"\"\n Make a standard normalization layer.\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNormSpecific(num_groups, channels)"
},
{
"identifier": "zero_module",
"path": "lvdm/basics.py",
"snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module"
},
{
"identifier": "checkpoint",
"path": "lvdm/common.py",
"snippet": "def checkpoint(func, inputs, params, flag):\n \"\"\"\n Evaluate a function without caching intermediate activations, allowing for\n reduced memory at the expense of extra compute in the backward pass.\n :param func: the function to evaluate.\n :param inputs: the argument sequence to pass to `func`.\n :param params: a sequence of parameters `func` depends on but does not\n explicitly take as arguments.\n :param flag: if False, disable gradient checkpointing.\n \"\"\"\n if flag:\n try:\n return ckpt(func, *inputs)\n except:\n args = tuple(inputs) + tuple(params)\n return CheckpointFunction.apply(func, len(inputs), *args)\n else:\n return func(*inputs)"
},
{
"identifier": "default",
"path": "lvdm/common.py",
"snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d"
},
{
"identifier": "exists",
"path": "lvdm/common.py",
"snippet": "def exists(val):\n return val is not None"
},
{
"identifier": "init_",
"path": "lvdm/common.py",
"snippet": "def init_(tensor):\n dim = tensor.shape[-1]\n std = 1 / math.sqrt(dim)\n tensor.uniform_(-std, std)\n return tensor"
},
{
"identifier": "max_neg_value",
"path": "lvdm/common.py",
"snippet": "def max_neg_value(t):\n return -torch.finfo(t.dtype).max"
},
{
"identifier": "uniq",
"path": "lvdm/common.py",
"snippet": "def uniq(arr):\n return{el: True for el in arr}.keys()"
}
] | import math
import torch
import torch.nn.functional as F
import xformers
import xformers.ops
from functools import partial
from inspect import isfunction
from einops import rearrange, repeat
from torch import einsum, nn
from lvdm.basics import conv_nd, normalization, zero_module
from lvdm.common import checkpoint, default, exists, init_, max_neg_value, uniq | 1,032 |
try:
XFORMERS_IS_AVAILBLE = True
except:
XFORMERS_IS_AVAILBLE = False
class RelativePosition(nn.Module):
""" https://github.com/evelinehong/Transformer_Relative_Position_PyTorch/blob/master/relative_position.py """
def __init__(self, num_units, max_relative_position):
super().__init__()
self.num_units = num_units
self.max_relative_position = max_relative_position
self.embeddings_table = nn.Parameter(torch.Tensor(max_relative_position * 2 + 1, num_units))
nn.init.xavier_uniform_(self.embeddings_table)
def forward(self, length_q, length_k):
device = self.embeddings_table.device
range_vec_q = torch.arange(length_q, device=device)
range_vec_k = torch.arange(length_k, device=device)
distance_mat = range_vec_k[None, :] - range_vec_q[:, None]
distance_mat_clipped = torch.clamp(distance_mat, -self.max_relative_position, self.max_relative_position)
final_mat = distance_mat_clipped + self.max_relative_position
# final_mat = th.LongTensor(final_mat).to(self.embeddings_table.device)
# final_mat = th.tensor(final_mat, device=self.embeddings_table.device, dtype=torch.long)
final_mat = final_mat.long()
embeddings = self.embeddings_table[final_mat]
return embeddings
class CrossAttention(nn.Module):
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.,
relative_position=False, temporal_length=None):
super().__init__()
inner_dim = dim_head * heads
|
try:
XFORMERS_IS_AVAILBLE = True
except:
XFORMERS_IS_AVAILBLE = False
class RelativePosition(nn.Module):
""" https://github.com/evelinehong/Transformer_Relative_Position_PyTorch/blob/master/relative_position.py """
def __init__(self, num_units, max_relative_position):
super().__init__()
self.num_units = num_units
self.max_relative_position = max_relative_position
self.embeddings_table = nn.Parameter(torch.Tensor(max_relative_position * 2 + 1, num_units))
nn.init.xavier_uniform_(self.embeddings_table)
def forward(self, length_q, length_k):
device = self.embeddings_table.device
range_vec_q = torch.arange(length_q, device=device)
range_vec_k = torch.arange(length_k, device=device)
distance_mat = range_vec_k[None, :] - range_vec_q[:, None]
distance_mat_clipped = torch.clamp(distance_mat, -self.max_relative_position, self.max_relative_position)
final_mat = distance_mat_clipped + self.max_relative_position
# final_mat = th.LongTensor(final_mat).to(self.embeddings_table.device)
# final_mat = th.tensor(final_mat, device=self.embeddings_table.device, dtype=torch.long)
final_mat = final_mat.long()
embeddings = self.embeddings_table[final_mat]
return embeddings
class CrossAttention(nn.Module):
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.,
relative_position=False, temporal_length=None):
super().__init__()
inner_dim = dim_head * heads | context_dim = default(context_dim, query_dim) | 4 | 2023-12-27 19:32:03+00:00 | 2k |
vita-epfl/social-transmotion | evaluate_jrdb.py | [
{
"identifier": "batch_process_coords",
"path": "dataset_jrdb.py",
"snippet": "def batch_process_coords(coords, masks, padding_mask, config, modality_selection='traj+2dbox', training=False, multiperson=True):\n joints = coords.to(config[\"DEVICE\"])\n masks = masks.to(config[\"DEVICE\"])\n in_F = config[\"TRAIN\"][\"input_track_size\"]\n \n in_joints_pelvis = joints[:,:, (in_F-1):in_F, 0:1, :].clone()\n in_joints_pelvis_last = joints[:,:, (in_F-2):(in_F-1), 0:1, :].clone()\n\n joints[:,:,:,0] = joints[:,:,:,0] - joints[:,0:1, (in_F-1):in_F, 0]\n joints[:,:,:,1:] = (joints[:,:,:,1:] - joints[:,:,(in_F-1):in_F,1:])*0.25 #rescale for BB\n\n B, N, F, J, K = joints.shape\n if not training:\n if modality_selection=='traj':\n joints[:,:,:,1:]=0\n elif modality_selection=='traj+2dbox':\n pass\n else:\n print('modality error')\n exit()\n else:\n # augment JRDB traj\n joints[:,:,:,0,:3] = getRandomRotatePoseTransform(config)(joints[:,:,:,0,:3])\n joints = joints.transpose(1, 2).reshape(B, F, N*J, K)\n in_joints_pelvis = in_joints_pelvis.reshape(B, 1, N, K)\n in_joints_pelvis_last = in_joints_pelvis_last.reshape(B, 1, N, K)\n masks = masks.transpose(1, 2).reshape(B, F, N*J)\n\n in_F, out_F = config[\"TRAIN\"][\"input_track_size\"], config[\"TRAIN\"][\"output_track_size\"] \n in_joints = joints[:,:in_F].float()\n out_joints = joints[:,in_F:in_F+out_F].float()\n in_masks = masks[:,:in_F].float()\n out_masks = masks[:,in_F:in_F+out_F].float()\n\n \n return in_joints, in_masks, out_joints, out_masks, padding_mask.float()"
},
{
"identifier": "create_dataset",
"path": "dataset_jrdb.py",
"snippet": "def create_dataset(dataset_name, logger, **args):\n logger.info(\"Loading dataset \" + dataset_name)\n\n if dataset_name == 'jta_all_visual_cues':\n dataset = JtaAllVisualCuesDataset(**args)\n elif dataset_name == 'jrdb_2dbox':\n dataset = Jrdb2dboxDataset(**args)\n else:\n raise ValueError(f\"Dataset with name '{dataset_name}' not found.\")\n \n return dataset"
},
{
"identifier": "collate_batch",
"path": "dataset_jrdb.py",
"snippet": "def collate_batch(batch):\n joints_list = []\n masks_list = []\n num_people_list = []\n for joints, masks in batch:\n \n joints_list.append(joints)\n masks_list.append(masks)\n num_people_list.append(torch.zeros(joints.shape[0]))\n \n joints = pad_sequence(joints_list, batch_first=True)\n masks = pad_sequence(masks_list, batch_first=True)\n padding_mask = pad_sequence(num_people_list, batch_first=True, padding_value=1).bool()\n\n return joints, masks, padding_mask"
},
{
"identifier": "create_model",
"path": "model_jrdb.py",
"snippet": "def create_model(config, logger):\n seq_len = config[\"MODEL\"][\"seq_len\"]\n token_num = config[\"MODEL\"][\"token_num\"]\n nhid=config[\"MODEL\"][\"dim_hidden\"]\n nhead=config[\"MODEL\"][\"num_heads\"]\n nlayers_local=config[\"MODEL\"][\"num_layers_local\"]\n nlayers_global=config[\"MODEL\"][\"num_layers_global\"]\n dim_feedforward=config[\"MODEL\"][\"dim_feedforward\"]\n\n if config[\"MODEL\"][\"type\"] == \"transmotion\":\n logger.info(\"Creating bert model.\")\n model = TransMotion(tok_dim=seq_len,\n nhid=nhid,\n nhead=nhead,\n dim_feedfwd=dim_feedforward,\n nlayers_local=nlayers_local,\n nlayers_global=nlayers_global,\n output_scale=config[\"MODEL\"][\"output_scale\"],\n obs_and_pred=config[\"TRAIN\"][\"input_track_size\"] + config[\"TRAIN\"][\"output_track_size\"],\n num_tokens=token_num,\n device=config[\"DEVICE\"]\n ).to(config[\"DEVICE\"]).float()\n else:\n raise ValueError(f\"Model type '{config['MODEL']['type']}' not found\")\n\n return model"
},
{
"identifier": "create_logger",
"path": "utils/utils.py",
"snippet": "def create_logger(logdir):\n head = '%(asctime)-15s %(message)s'\n if logdir != '':\n log_file = os.path.join(logdir, 'log.txt')\n logging.basicConfig(filename=log_file, format=head)\n # output to console as well\n logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))\n else:\n logging.basicConfig(format=head)\n\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n return logger"
}
] | import argparse
import torch
import random
import numpy as np
from progress.bar import Bar
from torch.utils.data import DataLoader
from dataset_jrdb import batch_process_coords, create_dataset, collate_batch
from model_jrdb import create_model
from utils.utils import create_logger | 1,456 |
def inference(model, config, input_joints, padding_mask, out_len=14):
model.eval()
with torch.no_grad():
pred_joints = model(input_joints, padding_mask)
output_joints = pred_joints[:,-out_len:]
return output_joints
def evaluate_ade_fde(model, modality_selection, dataloader, bs, config, logger, return_all=False, bar_prefix="", per_joint=False, show_avg=False):
in_F, out_F = config['TRAIN']['input_track_size'], config['TRAIN']['output_track_size']
bar = Bar(f"EVAL ADE_FDE", fill="#", max=len(dataloader))
batch_size = bs
batch_id = 0
ade = 0
fde = 0
ade_batch = 0
fde_batch = 0
for i, batch in enumerate(dataloader):
joints, masks, padding_mask = batch
padding_mask = padding_mask.to(config["DEVICE"])
|
def inference(model, config, input_joints, padding_mask, out_len=14):
model.eval()
with torch.no_grad():
pred_joints = model(input_joints, padding_mask)
output_joints = pred_joints[:,-out_len:]
return output_joints
def evaluate_ade_fde(model, modality_selection, dataloader, bs, config, logger, return_all=False, bar_prefix="", per_joint=False, show_avg=False):
in_F, out_F = config['TRAIN']['input_track_size'], config['TRAIN']['output_track_size']
bar = Bar(f"EVAL ADE_FDE", fill="#", max=len(dataloader))
batch_size = bs
batch_id = 0
ade = 0
fde = 0
ade_batch = 0
fde_batch = 0
for i, batch in enumerate(dataloader):
joints, masks, padding_mask = batch
padding_mask = padding_mask.to(config["DEVICE"])
| in_joints, in_masks, out_joints, out_masks, padding_mask = batch_process_coords(joints, masks, padding_mask, config, modality_selection) | 0 | 2023-12-25 15:12:40+00:00 | 2k |
facebookresearch/ca_body | ca_body/nn/shadow.py | [
{
"identifier": "tile2d",
"path": "ca_body/nn/blocks.py",
"snippet": "def tile2d(x, size: int):\n \"\"\"Tile a given set of features into a convolutional map.\n\n Args:\n x: float tensor of shape [N, F]\n size: int or a tuple\n\n Returns:\n a feature map [N, F, size[0], size[1]]\n \"\"\"\n # size = size if isinstance(size, tuple) else (size, size)\n # NOTE: expecting only int here (!!!)\n return x[:, :, np.newaxis, np.newaxis].expand(-1, -1, size, size)"
},
{
"identifier": "weights_initializer",
"path": "ca_body/nn/blocks.py",
"snippet": "def weights_initializer(lrelu_slope=0.2):\n # pyre-ignore\n def init_fn(m):\n if isinstance(\n m,\n (\n nn.Conv2d,\n nn.Conv1d,\n nn.ConvTranspose2d,\n nn.Linear,\n ),\n ):\n gain = nn.init.calculate_gain(\"leaky_relu\", lrelu_slope)\n nn.init.kaiming_uniform_(m.weight.data, a=gain)\n if hasattr(m, \"bias\") and m.bias is not None:\n nn.init.zeros_(m.bias.data)\n else:\n logger.debug(f\"skipping initialization for {m}\")\n\n return init_fn"
}
] | import logging
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import ca_body.nn.layers as la
from typing import Optional, Dict
from ca_body.nn.blocks import tile2d, weights_initializer | 1,068 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# TODO: use shared utils here?
logger = logging.getLogger(__name__)
class ShadowUNet(nn.Module):
def __init__(
self,
uv_size,
ao_mean,
shadow_size,
lrelu_slope=0.2,
beta=1.0,
n_dims=64,
interp_mode="bilinear",
biases=True,
trainable_mean=False,
):
super().__init__()
# this is the size of the output
self.uv_size = uv_size
self.shadow_size = shadow_size
ao_mean = F.interpolate(
th.as_tensor(ao_mean)[np.newaxis],
size=(self.shadow_size, self.shadow_size),
)[0]
if not trainable_mean:
# TODO:
self.register_buffer("ao_mean", ao_mean)
else:
self.register_parameter("ao_mean", th.nn.Parameter(ao_mean))
self.depth = 3
self.lrelu_slope = lrelu_slope
self.interp_mode = interp_mode
self.align_corners = None
if interp_mode == "bilinear":
self.align_corners = False
# the base number of dimensions for the shadow maps
n_dims = n_dims
# TODO: generate this?
self.n_enc_dims = [
(1, n_dims),
(n_dims, n_dims),
(n_dims, n_dims),
(n_dims, n_dims),
]
self.sizes = [shadow_size // (2**i) for i in range(len(self.n_enc_dims))]
logger.debug(f"sizes: {self.sizes}")
self.enc_layers = nn.ModuleList()
for i, size in enumerate(self.sizes):
n_in, n_out = self.n_enc_dims[i]
logger.debug(f"EncoderLayers({i}): {n_in}, {n_out}, {size}")
self.enc_layers.append(
nn.Sequential(
la.Conv2dWNUB(
n_in,
n_out,
kernel_size=3,
height=size,
width=size,
stride=1,
padding=1,
),
nn.LeakyReLU(self.lrelu_slope, inplace=True),
)
)
self.n_dec_dims = [
(n_dims, n_dims),
(n_dims * 2, n_dims),
(n_dims * 2, n_dims),
(n_dims * 2, n_dims),
]
self.dec_layers = nn.ModuleList()
for i in range(len(self.sizes)):
size = self.sizes[-i - 1]
n_in, n_out = self.n_dec_dims[i]
logger.debug(f"DecoderLayer({i}): {n_in}, {n_out}, {size}")
self.dec_layers.append(
nn.Sequential(
la.Conv2dWNUB(
n_in,
n_out,
kernel_size=3,
height=size,
width=size,
stride=1,
padding=1,
),
nn.LeakyReLU(self.lrelu_slope, inplace=True),
)
)
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# TODO: use shared utils here?
logger = logging.getLogger(__name__)
class ShadowUNet(nn.Module):
def __init__(
self,
uv_size,
ao_mean,
shadow_size,
lrelu_slope=0.2,
beta=1.0,
n_dims=64,
interp_mode="bilinear",
biases=True,
trainable_mean=False,
):
super().__init__()
# this is the size of the output
self.uv_size = uv_size
self.shadow_size = shadow_size
ao_mean = F.interpolate(
th.as_tensor(ao_mean)[np.newaxis],
size=(self.shadow_size, self.shadow_size),
)[0]
if not trainable_mean:
# TODO:
self.register_buffer("ao_mean", ao_mean)
else:
self.register_parameter("ao_mean", th.nn.Parameter(ao_mean))
self.depth = 3
self.lrelu_slope = lrelu_slope
self.interp_mode = interp_mode
self.align_corners = None
if interp_mode == "bilinear":
self.align_corners = False
# the base number of dimensions for the shadow maps
n_dims = n_dims
# TODO: generate this?
self.n_enc_dims = [
(1, n_dims),
(n_dims, n_dims),
(n_dims, n_dims),
(n_dims, n_dims),
]
self.sizes = [shadow_size // (2**i) for i in range(len(self.n_enc_dims))]
logger.debug(f"sizes: {self.sizes}")
self.enc_layers = nn.ModuleList()
for i, size in enumerate(self.sizes):
n_in, n_out = self.n_enc_dims[i]
logger.debug(f"EncoderLayers({i}): {n_in}, {n_out}, {size}")
self.enc_layers.append(
nn.Sequential(
la.Conv2dWNUB(
n_in,
n_out,
kernel_size=3,
height=size,
width=size,
stride=1,
padding=1,
),
nn.LeakyReLU(self.lrelu_slope, inplace=True),
)
)
self.n_dec_dims = [
(n_dims, n_dims),
(n_dims * 2, n_dims),
(n_dims * 2, n_dims),
(n_dims * 2, n_dims),
]
self.dec_layers = nn.ModuleList()
for i in range(len(self.sizes)):
size = self.sizes[-i - 1]
n_in, n_out = self.n_dec_dims[i]
logger.debug(f"DecoderLayer({i}): {n_in}, {n_out}, {size}")
self.dec_layers.append(
nn.Sequential(
la.Conv2dWNUB(
n_in,
n_out,
kernel_size=3,
height=size,
width=size,
stride=1,
padding=1,
),
nn.LeakyReLU(self.lrelu_slope, inplace=True),
)
)
| self.apply(weights_initializer(self.lrelu_slope)) | 1 | 2023-12-27 15:31:35+00:00 | 2k |
0x00wolf/hkrsAI | src/logger.py | [
{
"identifier": "PathFinder",
"path": "src/pathfinder.py",
"snippet": "class PathFinder:\n \"\"\"Class that returns an object with necessary paths for runtime operations\"\"\"\n def __init__(self, cwd: str):\n self.cwd = cwd\n self.config = f'{self.cwd}/config.json'\n self.logs = f'{self.cwd}/logs'\n self.prompts = f'{self.cwd}/prompts'\n self._first_runtime()\n self._prompts_dir_exists()\n\n @staticmethod\n def _get_cwd():\n \"\"\"Fetch the current working directory\"\"\"\n abs_path = os.path.abspath(__file__)\n cwd = os.path.dirname(abs_path)\n return cwd\n\n def _first_runtime(self):\n \"\"\"Initialize the config.json and logs directory if not present at runtime.\"\"\"\n self._init_cfg_json()\n self._init_logs_dir()\n\n def _prompts_dir_exists(self):\n \"\"\"Check to see if the prompts directory is present, or print an error and exit.\"\"\"\n if not os.path.exists(self.prompts):\n print('[*] error: prompts directory is missing')\n sys.exit()\n\n def _init_cfg_json(self):\n \"\"\"Generate the config.json file.\"\"\"\n if not os.path.exists(self.config):\n self._dump(CONFIG_INIT, self.config)\n\n def _init_logs_dir(self):\n \"\"\"Generate the logs directory\"\"\"\n if not os.path.exists(self.logs):\n os.makedirs(self.logs)\n\n @staticmethod\n def _dump(json_dict, json_file):\n \"\"\"Dumps a JSON object to a file\"\"\"\n with open(json_file, 'w') as f:\n json.dump(json_dict, f, indent=6)"
},
{
"identifier": "Conversation",
"path": "src/conversation.py",
"snippet": "class Conversation:\n messages: list[dict] = dataclasses.field(default_factory=list)\n query: str = ''\n reply: str = ''\n response: dict = dataclasses.field(default_factory=dict)\n tokens: int = 0\n\n def start(self, system_prompt: str):\n self.messages = [{\"role\": \"system\", \"content\": system_prompt}]\n print()\n return Conversation(messages=self.messages)\n\n def speak(self, content: str):\n self.messages.append({\"role\": \"user\", \"content\": content})\n return Conversation(messages=self.messages, query=self.query, reply=self.reply, response=self.response)\n\n def think(self, thought):\n if self.query == '':\n self.query = thought\n else:\n self.query = f'{self.query}\\n{thought}'\n return Conversation(messages=self.messages, query=self.query, reply=self.reply, response=self.response)\n\n def listen(self, gpt: GPT):\n \"\"\"Function to perform GPT chat completions via the API\"\"\"\n self.response = gpt.client.chat.completions.create(\n model=gpt.model,\n messages=self.messages,\n temperature=gpt.temperature,\n top_p=gpt.top_p,\n n=gpt.n,\n max_tokens=gpt.max_tokens,\n frequency_penalty=gpt.frequency_penalty,\n presence_penalty=gpt.presence_penalty,\n )\n self.reply = self.response.choices[0].message.content\n self.tokens = self.response.usage.total_tokens\n print(f\"\\n{self.reply}\\n\")\n self.messages.append({\"role\": \"assistant\", \"content\": self.reply})\n\n return Conversation(messages=self.messages, query=self.query, reply=self.reply, response=self.response)\n\n def breath(self):\n return Conversation(messages=self.messages, query='', reply=self.reply, response=self.response)\n\n @staticmethod\n def greet():\n return Conversation(messages=[], query='', reply='', response=None)"
}
] | import os
import re
import json
from typing import Type
from src.pathfinder import PathFinder
from src.conversation import Conversation | 1,302 |
class Logger:
def __init__(self, paths: PathFinder, log_level: int, log_format: str):
"""Logs conversations and saves data at the user's request"""
self.level: int = log_level
self.format: str = log_format
self.paths: Paths = paths
self.number: int = 0
self.file: str = ''
self.savefile: str = ''
self.save_number: int = 0
self.new_log()
@property
def level(self):
return self._level
@level.setter
def level(self, new_value: int):
if 1 != new_value != 2:
raise TypeError
else:
self._level = new_value
@property
def format(self):
return self._format
@format.setter
def format(self, new_value: str):
if new_value == 'txt' or new_value == 'json':
self._format = new_value
else:
self._format = new_value
def new_log(self):
self.number = self._next_number()
self.file = self._new_file()
def _next_number(self):
"""Fetch the next log number from config.json and updates it"""
config_data = self._load(self.paths.config)
self.number = log_num = config_data['log_number']
config_data['log_number'] = self.number + 1
self._dump(config_data, self.paths.config)
return self.number
def _new_file(self):
"""Generates a new logfile relative the current log number"""
while True: # to prevent inadvertently overwriting logs if the value is changed in config.json
self.file = f'{self.paths.logs}/log{self.number}.{self.format}'
try:
with open(self.file, 'x'):
print(f'[*] logfile generated ~ {self.file}')
return self.file
except FileExistsError:
self.number += 1
|
class Logger:
def __init__(self, paths: PathFinder, log_level: int, log_format: str):
"""Logs conversations and saves data at the user's request"""
self.level: int = log_level
self.format: str = log_format
self.paths: Paths = paths
self.number: int = 0
self.file: str = ''
self.savefile: str = ''
self.save_number: int = 0
self.new_log()
@property
def level(self):
return self._level
@level.setter
def level(self, new_value: int):
if 1 != new_value != 2:
raise TypeError
else:
self._level = new_value
@property
def format(self):
return self._format
@format.setter
def format(self, new_value: str):
if new_value == 'txt' or new_value == 'json':
self._format = new_value
else:
self._format = new_value
def new_log(self):
self.number = self._next_number()
self.file = self._new_file()
def _next_number(self):
"""Fetch the next log number from config.json and updates it"""
config_data = self._load(self.paths.config)
self.number = log_num = config_data['log_number']
config_data['log_number'] = self.number + 1
self._dump(config_data, self.paths.config)
return self.number
def _new_file(self):
"""Generates a new logfile relative the current log number"""
while True: # to prevent inadvertently overwriting logs if the value is changed in config.json
self.file = f'{self.paths.logs}/log{self.number}.{self.format}'
try:
with open(self.file, 'x'):
print(f'[*] logfile generated ~ {self.file}')
return self.file
except FileExistsError:
self.number += 1
| def log(self, conversation: Conversation): | 1 | 2023-12-22 07:04:47+00:00 | 2k |
ccurme/chesster | chesster/app/board_manager.py | [
{
"identifier": "display_board",
"path": "chesster/app/utils.py",
"snippet": "def display_board(board, player_side: chess.Color) -> None:\n \"\"\"Display board.\"\"\"\n board_size = 360\n if player_side == chess.WHITE:\n flipped = False\n else:\n flipped = True\n if board.move_stack:\n last_move = board.move_stack[-1]\n else:\n last_move = None\n return chess.svg.board(board, flipped=flipped, size=board_size, lastmove=last_move)"
},
{
"identifier": "get_engine_score",
"path": "chesster/app/utils.py",
"snippet": "def get_engine_score(board: chess.Board, player_side: chess.Color) -> int:\n \"\"\"Get board score in centipawns.\"\"\"\n engine = get_stockfish_engine()\n analysis = engine.analyse(board, chess.engine.Limit(time=0.1))\n engine.quit()\n score = analysis[\"score\"]\n if player_side == chess.WHITE:\n return score.white().score()\n else:\n return score.black().score()"
},
{
"identifier": "serialize_board_state_with_last_move",
"path": "chesster/app/utils.py",
"snippet": "def serialize_board_state_with_last_move(\n board: chess.Board, player_side: chess.Color\n) -> str:\n \"\"\"Make message capturing board state.\"\"\"\n board_state_str = f\"\"\"\n Player is playing as {serialize_player_side(player_side)}.\n\n Current board state:\n {serialize_board_state(board, player_side)}\n \"\"\"\n if board.move_stack:\n last_move = board.pop()\n last_move_san = board.san(last_move)\n board.push(last_move)\n if board.turn == player_side:\n last_to_move = \"Opponent\"\n else:\n last_to_move = \"Player\"\n previous_move_str = f\"\"\"{last_to_move} last move:\n {last_move_san}\n \"\"\"\n else:\n previous_move_str = \"No moves yet.\"\n return _clean_up_prompt(\n f\"\"\"\n {board_state_str}\n\n {previous_move_str}\n \"\"\"\n ).strip()"
}
] | import os
import urllib
import chess
from typing import Iterator
from fastapi import WebSocket, WebSocketDisconnect
from langserve import RemoteRunnable
from chesster.app.utils import (
display_board,
get_engine_score,
serialize_board_state_with_last_move,
) | 974 |
LANGSERVE_HOST = os.getenv("LANGSERVE_HOST", "localhost")
LANGSERVE_SECRET = os.getenv("LANGSERVE_SECRET", "secret")
CHAT_HISTORY_LENGTH = 50 # Number of most recent (human, ai) exchanges to retain.
class BoardManager:
def __init__(self):
self.active_websockets: list[WebSocket] = []
self.last_updated_image = None
self.board = chess.Board()
self.player_side = chess.WHITE
self.interesting_move_iterator = None
self.chat_history = []
self.remote_runnable = RemoteRunnable(
f"http://{LANGSERVE_HOST}:8001/chesster", headers={"x-token": LANGSERVE_SECRET}
)
async def set_board(self, board: chess.Board) -> None:
"""Set board."""
self.board = board
await self.update_board(self.board)
async def set_player_side(self, player_side: chess.Color) -> None:
"""Set player side."""
self.player_side = player_side
await self.update_board(self.board)
async def set_interesting_move_iterator(self) -> None:
"""Calculate interesting moves in board's move stack."""
self.interesting_move_iterator = self._interesting_move_iterator()
async def make_move(self, move: chess.Move) -> None:
"""Parse move and update board."""
self.board.push(move)
await self.update_board(self.board)
async def _interesting_move_iterator(
self, centipawn_threshold: int = 100
) -> Iterator[chess.Board]:
"""Make iterator over interesting moves according to Chess engine."""
new_board = chess.Board()
centipawns = 0
for move in self.board.move_stack:
new_board.push(move)
new_centipawns = get_engine_score(new_board, self.player_side)
if new_centipawns is None:
continue
delta = new_centipawns - centipawns
if new_board.turn != self.player_side: # player just moved
if abs(delta) > centipawn_threshold:
await self.update_board(new_board)
yield {
|
LANGSERVE_HOST = os.getenv("LANGSERVE_HOST", "localhost")
LANGSERVE_SECRET = os.getenv("LANGSERVE_SECRET", "secret")
CHAT_HISTORY_LENGTH = 50 # Number of most recent (human, ai) exchanges to retain.
class BoardManager:
def __init__(self):
self.active_websockets: list[WebSocket] = []
self.last_updated_image = None
self.board = chess.Board()
self.player_side = chess.WHITE
self.interesting_move_iterator = None
self.chat_history = []
self.remote_runnable = RemoteRunnable(
f"http://{LANGSERVE_HOST}:8001/chesster", headers={"x-token": LANGSERVE_SECRET}
)
async def set_board(self, board: chess.Board) -> None:
"""Set board."""
self.board = board
await self.update_board(self.board)
async def set_player_side(self, player_side: chess.Color) -> None:
"""Set player side."""
self.player_side = player_side
await self.update_board(self.board)
async def set_interesting_move_iterator(self) -> None:
"""Calculate interesting moves in board's move stack."""
self.interesting_move_iterator = self._interesting_move_iterator()
async def make_move(self, move: chess.Move) -> None:
"""Parse move and update board."""
self.board.push(move)
await self.update_board(self.board)
async def _interesting_move_iterator(
self, centipawn_threshold: int = 100
) -> Iterator[chess.Board]:
"""Make iterator over interesting moves according to Chess engine."""
new_board = chess.Board()
centipawns = 0
for move in self.board.move_stack:
new_board.push(move)
new_centipawns = get_engine_score(new_board, self.player_side)
if new_centipawns is None:
continue
delta = new_centipawns - centipawns
if new_board.turn != self.player_side: # player just moved
if abs(delta) > centipawn_threshold:
await self.update_board(new_board)
yield { | "board": serialize_board_state_with_last_move( | 2 | 2023-12-24 19:19:31+00:00 | 2k |
zkarpinski/codeinsight-sdk-python | tests/test_client.py | [
{
"identifier": "CodeInsightClient",
"path": "codeinsight_sdk/client.py",
"snippet": "class CodeInsightClient:\n def __init__(self,\n base_url: str,\n api_token: str,\n timeout: int = 60,\n verify_ssl: bool = True\n ):\n self.base_url = base_url\n self.api_url = f\"{base_url}/codeinsight/api\"\n self.__api_token = api_token\n self.__api_headers = {\n 'Content-Type': 'application/json',\n \"Authorization\": \"Bearer %s\" % self.__api_token,\n \"User-Agent\": \"codeinsight_sdk_python\",\n }\n self.__timeout = timeout\n self.__verify_ssl = verify_ssl\n\n def request(self, method, url_part: str, params: dict = None, body: any = None ):\n url = f\"{self.api_url}/{url_part}\"\n\n # Iterate over params and remove any that are None (Empty)\n if(params):\n for k, v in list(params.items()):\n if v is None:\n del params[k]\n\n response = requests.request(method, url,\n headers=self.__api_headers, params=params, json=body,\n timeout=self.__timeout, verify=self.__verify_ssl)\n\n if not response.ok:\n logger.error(f\"Error: {response.status_code} - {response.reason}\", exc_info=True)\n logger.error(response.text)\n raise CodeInsightError(response) \n\n return response\n\n @property\n def projects(self) -> ProjectHandler:\n return ProjectHandler(self)\n \n @property\n def reports(self) -> ReportHandler:\n return ReportHandler(self)\n \n \n # Coming soon...?\n def inventories(self):\n raise NotImplementedError(\"Inventories are not yet implemented\")\n \n def vulnerabilites(self):\n raise NotImplementedError\n \n def users(self):\n raise NotImplementedError\n \n def licenses(self):\n raise NotImplementedError\n \n def tasks(self):\n raise NotImplementedError\n \n def rules(self):\n raise NotImplementedError\n \n def files(self):\n raise NotImplementedError\n \n def folders(self):\n raise NotImplementedError\n \n def jobs(self):\n raise NotImplementedError\n \n def components(self):\n raise NotImplementedError"
},
{
"identifier": "CodeInsightError",
"path": "codeinsight_sdk/exceptions.py",
"snippet": "class CodeInsightError(GenericError):\n \"\"\"Error class for code insight API errors.\"\"\"\n def __init__(self, response: requests.Response):\n try:\n resp = response.json()\n self.code = response.status_code\n self.message = resp['Error: ']\n self.arguments = resp['Arguments: ']\n self.error = resp['Key: ']\n self.add_note(f\"Arguments: {self.arguments}\")\n super().__init__(\"Error: %s - %s\" % (self.code, self.message))\n\n except KeyError:\n raise ValueError(f\"Error parsing response: {resp}\")\n except json.decoder.JSONDecodeError:\n raise ValueError(f\"Error decoding response: {resp}\")"
}
] | import pytest
import logging
import requests_mock
from codeinsight_sdk import CodeInsightClient
from codeinsight_sdk.exceptions import CodeInsightError | 1,265 |
logger = logging.getLogger(__name__)
## CHANGE ME ##
TEST_URL = "https://api.revenera.com"
TEST_API_TOKEN = "your_api_token"
class TestCodeInsightClient:
@pytest.fixture
def client(self):
return CodeInsightClient(TEST_URL, TEST_API_TOKEN)
def test_client(self, client):
assert client.base_url == TEST_URL
def test_endpoint_not_found(self, client):
with requests_mock.Mocker() as m:
m.get(f"{TEST_URL}/codeinsight/api/projects", status_code=404)
with pytest.raises(Exception):
client.projects.all()
class TestProjectEndpoints:
@pytest.fixture
def client(self):
return CodeInsightClient(TEST_URL, TEST_API_TOKEN)
def test_create_project(self, client):
project_name = "Test"
with requests_mock.Mocker() as m:
m.post(f"{TEST_URL}/codeinsight/api/projects", text='{"data": {"id":1}}')
project_id = client.projects.create(project_name)
assert project_id == 1
def test_get_all_projects(self, client):
with requests_mock.Mocker() as m:
m.get(f"{TEST_URL}/codeinsight/api/projects", text='{"data": [{"id":1, "name":"Test"}, {"id":2, "name":"Test 2"}]}')
projects = client.projects.all()
assert len(projects) > 0
def test_get_project_id(self, client):
project_name = "Test"
with requests_mock.Mocker() as m:
m.get(f"{TEST_URL}/codeinsight/api/project/id", text='{ "Content: ": 1 }') # Yes, the key is called 'Content: ' ...
project_id = client.projects.get_id(project_name)
assert project_id == 1
def test_get_project_id_invalid(self,client):
project_name = "Invalid_Project"
fake_response_json = """{ "Arguments: " : ["",""],
"Key: ": " InvalidProjectNameParm",
"Error: ": "The project name entered was not found" }
"""
with requests_mock.Mocker() as m:
# Note, the key names end with a colon and space '...: '
m.get(f"{TEST_URL}/codeinsight/api/project/id", text=fake_response_json, status_code=400)
|
logger = logging.getLogger(__name__)
## CHANGE ME ##
TEST_URL = "https://api.revenera.com"
TEST_API_TOKEN = "your_api_token"
class TestCodeInsightClient:
@pytest.fixture
def client(self):
return CodeInsightClient(TEST_URL, TEST_API_TOKEN)
def test_client(self, client):
assert client.base_url == TEST_URL
def test_endpoint_not_found(self, client):
with requests_mock.Mocker() as m:
m.get(f"{TEST_URL}/codeinsight/api/projects", status_code=404)
with pytest.raises(Exception):
client.projects.all()
class TestProjectEndpoints:
@pytest.fixture
def client(self):
return CodeInsightClient(TEST_URL, TEST_API_TOKEN)
def test_create_project(self, client):
project_name = "Test"
with requests_mock.Mocker() as m:
m.post(f"{TEST_URL}/codeinsight/api/projects", text='{"data": {"id":1}}')
project_id = client.projects.create(project_name)
assert project_id == 1
def test_get_all_projects(self, client):
with requests_mock.Mocker() as m:
m.get(f"{TEST_URL}/codeinsight/api/projects", text='{"data": [{"id":1, "name":"Test"}, {"id":2, "name":"Test 2"}]}')
projects = client.projects.all()
assert len(projects) > 0
def test_get_project_id(self, client):
project_name = "Test"
with requests_mock.Mocker() as m:
m.get(f"{TEST_URL}/codeinsight/api/project/id", text='{ "Content: ": 1 }') # Yes, the key is called 'Content: ' ...
project_id = client.projects.get_id(project_name)
assert project_id == 1
def test_get_project_id_invalid(self,client):
project_name = "Invalid_Project"
fake_response_json = """{ "Arguments: " : ["",""],
"Key: ": " InvalidProjectNameParm",
"Error: ": "The project name entered was not found" }
"""
with requests_mock.Mocker() as m:
# Note, the key names end with a colon and space '...: '
m.get(f"{TEST_URL}/codeinsight/api/project/id", text=fake_response_json, status_code=400) | with pytest.raises(CodeInsightError): | 1 | 2023-12-29 00:49:12+00:00 | 2k |
chebupelka8/Engine | scripts/loop.py | [
{
"identifier": "Vec2",
"path": "scripts/math.py",
"snippet": "class Vec2:\r\n def __init__(self, x: int | float, y: int | float) -> None:\r\n self.__verify(x, y)\r\n\r\n self.__x = x\r\n self.__y = y\r\n \r\n @staticmethod\r\n def __verify(x, y) -> None:\r\n match x, y:\r\n case x, y if all(map(lambda a: isinstance(a, (int, float)), [x, y])):\r\n ...\r\n case _:\r\n raise ValueError(\"Arguments 'x' and 'y' should be 'int' or 'float'\")\r\n \r\n @property\r\n def x(self) -> int | float:\r\n return self.__x\r\n \r\n @x.setter\r\n def x(self, __value: int | float) -> None:\r\n self.__x = __value\r\n \r\n @property\r\n def y(self) -> int | float:\r\n return self.__y\r\n \r\n @y.setter\r\n def y(self, __value: int | float) -> None:\r\n self.__y = __value\r\n \r\n @property\r\n def xy(self) -> list:\r\n return [self.__x, self.__y]\r\n \r\n def __repr__(self) -> str:\r\n return f\"Vec2(x={self.__x}, y={self.__y})\"\r\n \r\n def __getitem__(self, __index) -> int | float:\r\n return [self.__x, self.__y][__index]\r\n \r\n def __setitem__(self, __index, __value) -> None:\r\n res = [self.__x, self.__y]\r\n res[__index] = __value\r\n self.__verify(*res)\r\n\r\n self.__x, self.__y = res\r\n \r\n def __abs__(self):\r\n return Vec2(abs(self.__x), abs(self.__y))\r\n \r\n def __add__(self, __other):\r\n if not isinstance(__other, Vec2): raise TypeError(\"Argument should be 'Vec2'\")\r\n\r\n return Vec2(self.__x + __other.x, self.__y + __other.y)\r\n\r\n def __mul__(self, __other):\r\n if not isinstance(__other, Vec2): raise TypeError(\"Argument should be 'Vec2'\")\r\n\r\n return Vec2(self.__x * __other.x, self.__y * __other.y)\r"
},
{
"identifier": "Image",
"path": "scripts/image.py",
"snippet": "class Image:\r\n def __init__(self, __arg: str | pygame.Surface, should_convert: bool = True) -> None:\r\n self.__image = self.__load(__arg)\r\n if should_convert: self.__image = self.__image.convert_alpha()\r\n \r\n @classmethod\r\n def __load(cls, __arg: str | pygame.Surface) -> pygame.Surface:\r\n cls.__verify(__arg)\r\n \r\n return pygame.image.load(__arg) if isinstance(__arg, str) else __arg\r\n \r\n @staticmethod\r\n def __verify(__arg: Any) -> None:\r\n if not type(__arg) in (str, pygame.Surface): raise TypeError(f\"Argument should be a string or a 'Surface', not {type(__arg)}\")\r\n \r\n @property\r\n def image(self) -> pygame.Surface:\r\n return self.__image\r\n \r\n @property\r\n def size(self) -> Vec2:\r\n return Vec2(*self.__image.get_size())\r\n \r\n @image.setter\r\n def image(self, image: pygame.Surface) -> None:\r\n self.__image = image\r\n \r\n def __repr__(self) -> str:\r\n return f\"Image(size={self.image.get_size()}, alpha={self.image.get_alpha()})\"\r"
}
] | import pygame, sys
from pygame.locals import *
from .math import Vec2
from .image import Image
| 951 |
class WindowLoop:
def __init__(self, __size: Vec2, fps: int = 144) -> None:
pygame.init()
self.__display = pygame.display.set_mode((__size.x, __size.y))
pygame.display.set_caption("Engine: v0.1")
|
class WindowLoop:
def __init__(self, __size: Vec2, fps: int = 144) -> None:
pygame.init()
self.__display = pygame.display.set_mode((__size.x, __size.y))
pygame.display.set_caption("Engine: v0.1")
| pygame.display.set_icon(Image("Engine/assets/icon.png").image)
| 1 | 2023-12-25 07:53:49+00:00 | 2k |
lxbme/TSPLifesaver | TSPLifesaver/tools.py | [
{
"identifier": "AbstractPoint",
"path": "TSPLifesaver/abc/abc.py",
"snippet": "class AbstractPoint(ABC, MutableSequence):\n def __delitem__(self, key): ...\n\n def insert(self, index, value): ...\n\n @abstractmethod\n def __init__(self,pos):\n \"\"\"\n Init the Point\n :param pos:\n \"\"\"\n\n @property\n def name(self):\n \"\"\"\n The name of the Point.\n :return: Any\n \"\"\"\n return None\n\n @abstractmethod\n def distance_to(self, other: MutableSequence):\n \"\"\"\n Calculate the distance between this Point and another.\n :param other:\n :return The distance between the:\n \"\"\""
},
{
"identifier": "AbstractRoute",
"path": "TSPLifesaver/abc/abc.py",
"snippet": "class AbstractRoute(ABC, MutableSequence):\n @abstractmethod\n def swap(self, index_1: int, index_2: int) -> None:\n \"\"\"\n This method should swap the positions of the two elements by indexes.\n \"\"\"\n\n @abstractmethod\n def distance(self):\n \"\"\"\n This method should return the total length of the route.\n :return Number: The total length of the route:\n \"\"\""
},
{
"identifier": "BasicRoute",
"path": "TSPLifesaver/structure.py",
"snippet": "class BasicRoute(AbstractRoute):\n def __init__(self, points: MutableSequence[AbstractPoint], name=\"BasicRoute\"):\n self.points = points\n self.name = name\n\n def __iter__(self):\n return iter(self.points)\n\n def __getitem__(self, item):\n return self.points[item]\n\n def __setitem__(self, item, value):\n self.points[item] = value\n\n def __delitem__(self, item):\n del self.points[item]\n\n def __len__(self):\n return len(self.points)\n\n def __str__(self):\n string = self.name + \"(\\n\"\n for point in self.points:\n string += f\"{point.name}: {[point[i] for i in range(len(point))]}\\n\"\n string += \")\"\n return string\n\n def insert(self, index, value):\n self.points.insert(index, value)\n\n def distance(self):\n \"\"\"\n Calculates the total distance.\n :return:\n \"\"\"\n return sum([pre.distance_to(after) for pre, after in zip(self[:-1], self[1:])])\n\n def swap(self, index_1: int, index_2: int) -> None:\n \"\"\"\n Swaps two points\n :param index_1:\n :param index_2:\n :return:\n \"\"\"\n self[index_1], self[index_2] = self[index_2], self[index_1]\n\n def append(self, value: AbstractPoint):\n self.points.append(value)"
},
{
"identifier": "PointWithEuclideanDistance",
"path": "TSPLifesaver/structure.py",
"snippet": "class PointWithEuclideanDistance(BasicPoint):\n def __init__(self, pos: MutableSequence, name: Any = None):\n super().__init__(pos, name)"
},
{
"identifier": "SimulatedAnnealing",
"path": "TSPLifesaver/optimizer.py",
"snippet": "class SimulatedAnnealing(AbstractOptimizer):\n def __init__(self, initial_route: AbstractRoute, temperature, cooling_rate, min_temperature):\n \"\"\"\n :param initial_route:\n :param initial_route:\n :param temperature:\n :param cooling_rate:\n :param min_temperature:\n \"\"\"\n self.current_route = deepcopy(initial_route)\n self.best_route = deepcopy(initial_route)\n self.temperature = temperature\n self.cooling_rate = cooling_rate\n self.min_temperature = min_temperature\n\n def optimize(self):\n while self.temperature > self.min_temperature:\n new_route = deepcopy(self.current_route)\n\n # exchange randomly\n i, j = random.sample(range(len(new_route)), 2)\n new_route.swap(i, j)\n\n # calc cost\n current_cost = self.current_route.distance()\n new_cost = new_route.distance()\n cost_difference = current_cost - new_cost\n\n # accepting the new result?\n if cost_difference > 0 or math.exp(cost_difference / self.temperature) > random.random():\n self.current_route = new_route\n if new_cost < self.best_route.distance():\n self.best_route = new_route\n\n # decrease the temperature\n self.temperature *= (1 - self.cooling_rate)\n\n return self.best_route"
}
] | from typing import Iterable, MutableSequence, Type
from random import shuffle
from copy import deepcopy
from TSPLifesaver.abc import AbstractRoute, AbstractPoint
from TSPLifesaver.structure import BasicRoute, PointWithEuclideanDistance
from TSPLifesaver.optimizer import SimulatedAnnealing | 1,502 |
def route_from_sequence(sequence: Iterable[MutableSequence], route: AbstractRoute = BasicRoute([]),
point_class: Type[AbstractPoint] = PointWithEuclideanDistance,
name_offset: int = 1, ) -> AbstractRoute:
"""
:param route: Instances of the AbstractRoute class or its subclasses, defaults to empty instance of BasicRoute
:param name_offset: Index of the name
:param sequence: Sequence containing coordinates
:param point_class: AbstractPoint or its subclasses ,defaults to PointWithEuclideanDistance
:return: a new route
"""
index = name_offset
for pos in sequence:
try:
point = point_class(pos, name=f"{index}")
except:
point = point_class(pos)
route.append(point)
index += 1
return route
def simulated_annealing(route: AbstractRoute, epoch: int = 100, temperature: float = 10000,
cooling_rate: float = 0.03, min_temperature: float = 1,
log: bool = False) -> AbstractRoute:
"""
:param route: Instances of the AbstractRoute class or its subclasses
:param epoch: Number of epochs to simulate, defaults to 100
:param temperature: Temperature of the annealing, defaults to 10000
:param cooling_rate: Cooling rate of the annealing, defaults to 0.03
:param min_temperature: Minimum temperature of the annealing, defaults to 1
:param log: Whether to print the log of the annealing, defaults to False
:return: optimized route
"""
if len(route):
best_route = deepcopy(route)
for i in range(epoch):
if log:
print(f"Running epoch {i} of {epoch}")
shuffle(route)
|
def route_from_sequence(sequence: Iterable[MutableSequence], route: AbstractRoute = BasicRoute([]),
point_class: Type[AbstractPoint] = PointWithEuclideanDistance,
name_offset: int = 1, ) -> AbstractRoute:
"""
:param route: Instances of the AbstractRoute class or its subclasses, defaults to empty instance of BasicRoute
:param name_offset: Index of the name
:param sequence: Sequence containing coordinates
:param point_class: AbstractPoint or its subclasses ,defaults to PointWithEuclideanDistance
:return: a new route
"""
index = name_offset
for pos in sequence:
try:
point = point_class(pos, name=f"{index}")
except:
point = point_class(pos)
route.append(point)
index += 1
return route
def simulated_annealing(route: AbstractRoute, epoch: int = 100, temperature: float = 10000,
cooling_rate: float = 0.03, min_temperature: float = 1,
log: bool = False) -> AbstractRoute:
"""
:param route: Instances of the AbstractRoute class or its subclasses
:param epoch: Number of epochs to simulate, defaults to 100
:param temperature: Temperature of the annealing, defaults to 10000
:param cooling_rate: Cooling rate of the annealing, defaults to 0.03
:param min_temperature: Minimum temperature of the annealing, defaults to 1
:param log: Whether to print the log of the annealing, defaults to False
:return: optimized route
"""
if len(route):
best_route = deepcopy(route)
for i in range(epoch):
if log:
print(f"Running epoch {i} of {epoch}")
shuffle(route) | opt = SimulatedAnnealing(route, temperature=temperature, | 4 | 2023-12-26 10:08:09+00:00 | 2k |