prompt
stringlengths 1.74k
34.3k
| ref
stringlengths 4
432
|
---|---|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: dtiesling/flask-muck
# Path: tests/app.py
class GuardianModel(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String, nullable=False, unique=True)
age = db.Column(db.Integer, nullable=True)
family_id = db.Column(db.Integer, db.ForeignKey(FamilyModel.id))
family = db.relationship(FamilyModel)
children: Mapped[list["ChildModel"]] = db.relationship()
# Path: tests/app.py
class ToyApiView(BaseApiView):
api_name = "toy"
Model = ToyModel
ResponseSchema = ToySchema
CreateSchema = ToySchema
PatchSchema = ToySchema
UpdateSchema = ToySchema
parent = ChildApiView
one_to_one_api = True
# Path: tests/app.py
class ChildModel(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String, nullable=False)
age = db.Column(db.Integer, nullable=True)
family_id = db.Column(db.Integer, db.ForeignKey(FamilyModel.id))
guardian_id = db.Column(db.Integer, db.ForeignKey(GuardianModel.id))
guardian = db.relationship(GuardianModel, back_populates="children")
toy: Mapped["ToyModel"] = db.relationship(uselist=False)
# Path: tests/app.py
class ToyModel(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String, nullable=False)
family_id = db.Column(db.Integer, db.ForeignKey(FamilyModel.id))
child_id = db.Column(db.Integer, db.ForeignKey(ChildModel.id))
child = db.relationship(ChildModel, back_populates="toy")
# Path: tests/app.py
class BaseApiView(FlaskMuckApiView):
"""Base view to inherit from. Helpful for setting class variables shared with all API views such as "sqlalchemy_db"
and "decorators".
"""
session = db.session
decorators = [login_required]
pre_create_callbacks = [PreCallback]
pre_update_callbacks = [PreCallback]
pre_patch_callbacks = [PreCallback]
pre_delete_callbacks = [PreCallback]
post_create_callbacks = [PostCallback]
post_update_callbacks = [PostCallback]
post_patch_callbacks = [PostCallback]
post_delete_callbacks = [PostCallback]
# Path: tests/app.py
class PreCallback(FlaskMuckCallback):
def execute(self) -> None:
return
# Path: tests/app.py
class PostCallback(FlaskMuckCallback):
def execute(self) -> None:
return
# Path: tests/app.py
class GuardianApiView(BaseApiView):
api_name = "guardians"
Model = GuardianModel
ResponseSchema = GuardianSchema
CreateSchema = GuardianSchema
PatchSchema = GuardianSchema
UpdateSchema = GuardianSchema
DetailSchema = GuardianDetailSchema
searchable_columns = [GuardianModel.name, GuardianModel.age]
# Path: tests/test.py
import json
import pytest
from unittest.mock import patch
from pydantic import BaseModel, ConfigDict
from flask_muck.exceptions import MuckImplementationError
from flask_muck.utils import (
get_url_rule,
get_fk_column,
get_query_filters_from_request_path,
get_join_models_from_parent_views,
)
from tests.app import (
GuardianModel,
ToyApiView,
ChildModel,
ToyModel,
BaseApiView,
PreCallback,
PostCallback,
GuardianApiView,
)
def test_bad_json(self, get):
get("/guardians/?filters=notjson", expected_status_code=400)
def test_column_does_not_exist(self, filter_guardians):
filter_guardians({"nope": "fail"}, expected_status_code=400)
filter_guardians({"nope.nested": "fail"}, expected_status_code=400)
filter_guardians({"children.nope": "fail"}, expected_status_code=400)
@pytest.mark.usefixtures("simpsons", "belchers")
class TestSort:
def test_sort(self, get, marge, bart, maggie, lisa):
assert get(f"/guardians/{marge.id}/children/?sort=name") == [
{"name": bart.name},
{"name": lisa.name},
{"name": maggie.name},
]
assert get(f"/guardians/{marge.id}/children/?sort=age") == [
{"name": maggie.name},
{"name": lisa.name},
{"name": bart.name},
]
def test_sort_asc(self, get, marge, maggie, lisa, bart):
assert get(f"/guardians/{marge.id}/children/?sort=age__asc") == [
{"name": maggie.name},
{"name": lisa.name},
{"name": bart.name},
]
assert get(
f"/guardians/{marge.id}/children/?sort=name__asc",
) == [{"name": bart.name}, {"name": lisa.name}, {"name": maggie.name}]
def test_sort_desc(self, get, marge, lisa, maggie, bart):
assert get(
f"/guardians/{marge.id}/children/?sort=age__desc",
) == [{"name": bart.name}, {"name": lisa.name}, {"name": maggie.name}]
assert get(
f"/guardians/{marge.id}/children/?sort=name__desc",
) == [{"name": maggie.name}, {"name": lisa.name}, {"name": bart.name}]
def test_nested_sort(self, get):
assert get(f"/guardians/?sort=family.surname") == [
{"name": "Bob"},
{"name": "Marge"},
]
def test_bad_sort(self, get):
get(f"/guardians/?sort=name__fail", expected_status_code=400)
get(f"/guardians/?sort=fail", expected_status_code=400)
get(f"/guardians/?sort=family.fail", expected_status_code=400)
get(f"/guardians/?sort=double.fail", expected_status_code=400)
def test_change_operator_separator(
self, get, monkeypatch, marge, lisa, bart, maggie
):
monkeypatch.setattr(BaseApiView, "operator_separator", "|")
assert get(
f"/guardians/{marge.id}/children/?sort=age|desc",
) == [{"name": bart.name}, {"name": lisa.name}, {"name": maggie.name}]
assert get(
f"/guardians/{marge.id}/children/?sort=name|desc",
) == [{"name": maggie.name}, {"name": lisa.name}, {"name": bart.name}]
@pytest.mark.usefixtures("simpsons", "belchers")
class TestSearch:
def test_search(self, get, marge):
assert get(f"/guardians/?search=marge") == [{"name": "Marge"}]
assert get(f"/guardians/?search=nobody") == []
assert get(f"/guardians/{marge.id}/children/?search=bart") == [{"name": "Bart"}]
assert get(f"/guardians/{marge.id}/children/?search=nope") == []
def test_unsupported_search(self, get, marge, bart, monkeypatch):
monkeypatch.setattr(GuardianApiView, "searchable_columns", [])
get(f"/guardians/?search=marge", expected_status_code=400)
class TestCallbacks:
@pytest.fixture
def pre_callback_patch(self):
with patch.object(PreCallback, "execute") as patched:
yield patched
@pytest.fixture
def post_callback_patch(self):
with patch.object(PostCallback, "execute") as patched:
yield patched
def test_create_callbacks(
self, post, user, pre_callback_patch, post_callback_patch
):
post("/guardians/", json={"name": "Jill"})
pre_callback_patch.assert_called_once()
post_callback_patch.assert_called_once()
def test_update_callbacks(
self, put, guardian, pre_callback_patch, post_callback_patch
):
put(f"/guardians/{guardian.id}/", json={"name": "updated"})
pre_callback_patch.assert_called_once()
post_callback_patch.assert_called_once()
def test_patch_callbacks(
self, put, patch, guardian, pre_callback_patch, post_callback_patch
):
patch(f"/guardians/{guardian.id}/", json={"name": "patched"})
pre_callback_patch.assert_called_once()
post_callback_patch.assert_called_once()
def test_delete_callbacks(
self, client, guardian, pre_callback_patch, post_callback_patch
):
client.delete(f"/guardians/{guardian.id}/")
pre_callback_patch.assert_called_once()
post_callback_patch.assert_called_once()
@pytest.mark.usefixtures("simpsons", "belchers")
| class TestNestedApis: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: BrianPugh/cyclopts
# Path: cyclopts/_convert.py
def token_count(type_: Union[Type, inspect.Parameter]) -> Tuple[int, bool]:
"""The number of tokens after a keyword the parameter should consume.
Parameters
----------
type_: Type
A type hint/annotation to infer token_count from if not explicitly specified.
Returns
-------
int
Number of tokens to consume.
bool
If this is ``True`` and positional, consume all remaining tokens.
The returned number of tokens constitutes a single element of the iterable-to-be-parsed.
"""
from cyclopts.parameter import get_hint_parameter
annotation = get_hint_parameter(type_)[0]
annotation = resolve(annotation)
origin_type = get_origin_and_validate(annotation)
if (origin_type or annotation) is tuple:
args = get_args(annotation)
if args:
return sum(token_count(x)[0] for x in args if x is not ...), ... in args
else:
return 1, True
elif (origin_type or annotation) is bool:
return 0, False
elif annotation in _iterable_types or (origin_type in _iterable_types and len(get_args(annotation)) == 0):
return 1, True
elif (origin_type in _iterable_types or origin_type is collections.abc.Iterable) and len(get_args(annotation)):
return token_count(get_args(annotation)[0])[0], True
else:
return 1, False
# Path: cyclopts/exceptions.py
class CoercionError(CycloptsError):
"""There was an error performing automatic type coercion."""
input_value: str = ""
"""
String input token that couldn't be coerced.
"""
target_type: Optional[Type] = None
"""
Intended type to coerce into.
"""
parameter: Optional[inspect.Parameter] = None
def __str__(self):
if self.parameter:
assert self.parameter2cli is not None
parameter_cli_name = ",".join(self.parameter2cli[self.parameter])
if self.msg is not None:
if self.parameter:
return f"{parameter_cli_name}: " + self.msg # pyright: ignore[reportUnboundVariable]
else:
return self.msg
response = f'Error converting value "{self.input_value}"'
if self.target_type is not None:
target_type = str(self.target_type).lstrip("typing.") # lessens the verbosity a little bit.
response += f" to {target_type}"
if self.parameter:
response += f' for "{parameter_cli_name}"' # pyright: ignore[reportUnboundVariable]
return super().__str__() + response + "."
# Path: cyclopts/exceptions.py
class CycloptsError(Exception):
"""Root exception for runtime errors.
As CycloptsErrors bubble up the Cyclopts stack, more information is added to it.
Finally, :func:`cyclopts.exceptions.format_cyclopts_error` formats the message nicely for the user.
"""
msg: Optional[str] = None
"""
If set, override automatic message generation.
"""
verbose: bool = True
"""
More verbose error messages; aimed towards developers debugging their Cyclopts app.
Defaults to ``False``.
"""
root_input_tokens: Optional[List[str]] = None
"""
The parsed CLI tokens that were initially fed into the :class:`App`.
"""
unused_tokens: Optional[List[str]] = None
"""
Leftover tokens after parsing is complete.
"""
target: Optional[Callable] = None
"""
The python function associated with the command being parsed.
"""
cli2parameter: Optional[Dict[str, Tuple[inspect.Parameter, Any]]] = None
"""
Dictionary mapping CLI strings to python parameters.
"""
parameter2cli: Optional[ParameterDict] = None
"""
Dictionary mapping function parameters to possible CLI tokens.
"""
command_chain: Optional[List[str]] = None
"""
List of command that lead to ``target``.
"""
app: Optional["App"] = None
"""
The Cyclopts application itself.
"""
def __str__(self):
if self.msg is not None:
return self.msg
strings = []
if self.verbose:
strings.append(type(self).__name__)
if self.target:
file, lineno = _get_function_info(self.target)
strings.append(f'Function defined in file "{file}", line {lineno}:')
strings.append(f" {self.target.__name__}{inspect.signature(self.target)}")
if self.root_input_tokens is not None:
strings.append(f"Root Input Tokens: {self.root_input_tokens}")
else:
pass
if strings:
return "\n".join(strings) + "\n"
else:
return ""
def _find_and_replace(self, s: str) -> str:
"""Replaces all instances of "--python-variable-name" with "--cli-variable-name"."""
if self.parameter2cli is None:
return s
for p, names in self.parameter2cli.items():
target = f"--{p.name}"
replacement = names[0]
s = s.replace(target, replacement)
return s
# Path: cyclopts/exceptions.py
class MissingArgumentError(CycloptsError):
"""A parameter had insufficient tokens to be populated."""
parameter: inspect.Parameter
"""
The parameter that failed to parse.
"""
tokens_so_far: List[str]
"""
The tokens that were parsed so far for this Parameter.
"""
def __str__(self):
from cyclopts._convert import token_count
count, _ = token_count(self.parameter)
if count == 0:
required_string = "flag required"
only_got_string = ""
elif count == 1:
required_string = "requires an argument"
only_got_string = ""
else:
required_string = f"requires {count} arguments"
only_got_string = f" Only got {len(self.tokens_so_far)}."
assert self.parameter2cli is not None
parameter_cli_name = ",".join(self.parameter2cli[self.parameter])
strings = []
if self.command_chain:
strings.append(
f'Command "{" ".join(self.command_chain)}" parameter "{parameter_cli_name}" {required_string}.{only_got_string}'
)
else:
strings.append(f'Parameter "{parameter_cli_name}" {required_string}.{only_got_string}')
if self.verbose:
strings.append(f" Parsed: {self.tokens_so_far}.")
return super().__str__() + " ".join(strings)
# Path: cyclopts/exceptions.py
class RepeatArgumentError(CycloptsError):
"""The same parameter has erroneously been specified multiple times."""
parameter: inspect.Parameter
"""
The repeated parameter.
"""
def __str__(self):
assert self.parameter2cli is not None
parameter_cli_name = ",".join(self.parameter2cli[self.parameter])
return super().__str__() + f"Parameter {parameter_cli_name} specified multiple times."
# Path: cyclopts/exceptions.py
class ValidationError(CycloptsError):
"""Validator function raised an exception."""
value: str
"""Parenting Assertion/Value/Type Error message."""
parameter: Optional[inspect.Parameter] = None
"""Parameter who's ``validator`` function failed."""
def __str__(self):
if self.parameter is None:
self.value = self._find_and_replace(self.value)
return super().__str__() + self.value
else:
assert self.parameter2cli is not None
parameter_cli_name = ",".join(self.parameter2cli[self.parameter])
return super().__str__() + f"Invalid value for {parameter_cli_name}. {self.value}"
# Path: cyclopts/parameter.py
def get_hint_parameter(
type_: Union[Type, inspect.Parameter], *default_parameters: Optional[Parameter]
) -> Tuple[Type, Parameter]:
"""Get the type hint and Cyclopts :class:`Parameter` from a type-hint.
If a ``cyclopts.Parameter`` is not found, a default Parameter is returned.
"""
cyclopts_parameters = []
if isinstance(type_, inspect.Parameter):
annotation = type_.annotation
if annotation is inspect.Parameter.empty or resolve(annotation) is Any:
if type_.default in (inspect.Parameter.empty, None):
annotation = str
else:
return get_hint_parameter(type(type_.default), *default_parameters)
else:
annotation = type_
if annotation is inspect.Parameter.empty:
annotation = str
annotation = resolve_optional(annotation)
if type(annotation) is AnnotatedType:
annotations = annotation.__metadata__ # pyright: ignore[reportGeneralTypeIssues]
annotation = get_args(annotation)[0]
cyclopts_parameters = [x for x in annotations if isinstance(x, Parameter)]
annotation = resolve(annotation)
cparam = Parameter.combine(*default_parameters, *cyclopts_parameters)
return annotation, cparam
# Path: cyclopts/parameter.py
def validate_command(f: Callable):
"""Validate if a function abides by Cyclopts's rules.
Raises
------
ValueError
Function has naming or parameter/signature inconsistencies.
"""
signature = inspect.signature(f)
for iparam in signature.parameters.values():
get_origin_and_validate(iparam.annotation)
type_, cparam = get_hint_parameter(iparam)
if not cparam.parse and iparam.kind is not iparam.KEYWORD_ONLY:
raise ValueError("Parameter.parse=False must be used with a KEYWORD_ONLY function parameter.")
# Path: cyclopts/resolve.py
class ResolvedCommand:
command: Callable
groups: List[Group]
groups_iparams: List[Tuple[Group, List[inspect.Parameter]]]
iparam_to_groups: ParameterDict
iparam_to_cparam: ParameterDict
name_to_iparam: Dict[str, inspect.Parameter]
def __init__(
self,
f,
app_parameter: Optional[Parameter] = None,
group_arguments: Optional[Group] = None,
group_parameters: Optional[Group] = None,
parse_docstring: bool = True,
):
"""
``app_parameter`` implicitly has the command-group parameter already resolved.
Parameters
----------
f: Callable
Function to resolve annotated :class:`Parameters`.
app_parameter:
Default :class:`Parameter` to inherit configuration from.
group_arguments: Optional[Group]
Default :class:`Group` for positional-only arguments.
group_parameters: Optional[Group]
Default :class:`Group` for non-positional-only arguments.
parse_docstring: bool
Parse the docstring to populate Parameter ``help``, if not explicitly set.
Disable for improved performance if ``help`` won't be used in the resulting :class:`Parameter`.
"""
if group_arguments is None:
group_arguments = Group.create_default_arguments()
if group_parameters is None:
group_parameters = Group.create_default_parameters()
self.command = f
signature = inspect.signature(f)
self.name_to_iparam = cast(Dict[str, inspect.Parameter], signature.parameters)
# Get:
# 1. Fully resolved and created Groups.
# 2. A mapping of inspect.Parameter to those Group objects.
self.groups, self.iparam_to_groups = _resolve_groups(f, app_parameter, group_arguments, group_parameters)
# Fully Resolve each Cyclopts Parameter
self.iparam_to_cparam = ParameterDict()
iparam_to_docstring_cparam = _resolve_docstring(f) if parse_docstring else ParameterDict()
for iparam, groups in self.iparam_to_groups.items():
if iparam.kind in (iparam.POSITIONAL_ONLY, iparam.VAR_POSITIONAL):
# Name is only used for help-string
names = [iparam.name.upper()]
else:
names = ["--" + iparam.name.replace("_", "-")]
default_name_parameter = Parameter(name=names)
cparam = get_hint_parameter(
iparam,
app_parameter,
*(x.default_parameter for x in groups),
iparam_to_docstring_cparam.get(iparam),
default_name_parameter,
Parameter(required=iparam.default is iparam.empty),
)[1]
self.iparam_to_cparam[iparam] = cparam
self.bind = signature.bind_partial if _has_unparsed_parameters(f, app_parameter) else signature.bind
# Create a convenient group-to-iparam structure
self.groups_iparams = [
(
group,
[iparam for iparam, groups in self.iparam_to_groups.items() if group in groups],
)
for group in self.groups
]
# Path: cyclopts/utils.py
class ParameterDict(MutableMapping):
"""A dictionary implementation that can handle mutable ``inspect.Parameter`` as keys."""
def __init__(self, store: Optional[Dict[inspect.Parameter, Any]] = None):
self.store = {}
self.reverse_mapping = {}
if store is not None:
for k, v in store.items():
self[k] = v
def _param_key(self, param: inspect.Parameter) -> tuple:
if not isinstance(param, inspect.Parameter):
raise TypeError(f"Key must be an inspect.Parameter; got {type(param)}.")
return (param.name, param.kind, param.annotation)
def __getitem__(self, key: inspect.Parameter) -> Any:
return self.store[self._param_key(key)]
def __setitem__(self, key: inspect.Parameter, value: Any) -> None:
processed_key = self._param_key(key)
self.store[processed_key] = value
self.reverse_mapping[processed_key] = key
def __delitem__(self, key: inspect.Parameter) -> None:
processed_key = self._param_key(key)
del self.store[processed_key]
del self.reverse_mapping[processed_key]
def __iter__(self) -> Iterator[inspect.Parameter]:
return iter(self.reverse_mapping.values())
def __len__(self) -> int:
return len(self.store)
def __repr__(self) -> str:
inner = []
for key, value in self.store.items():
inner.append(f"Parameter(name={key[0]!r}, kind={key[1]}, annotation={key[2]}): {value}")
return "{" + ", ".join(inner) + "}"
def __contains__(self, key: object) -> bool:
if not isinstance(key, inspect.Parameter):
raise TypeError(f"Key must be an inspect.Parameter; got {type(key)}.")
return self._param_key(key) in self.store
def setdefault(self, key: inspect.Parameter, default: Any = None) -> Any:
processed_key = self._param_key(key)
if processed_key not in self.store:
self.reverse_mapping[processed_key] = key
return self.store.setdefault(processed_key, default)
def get(self, key: inspect.Parameter, default: Any = None):
try:
return self[key]
except KeyError:
return default
# Path: cyclopts/bind.py
import inspect
import itertools
import os
import shlex
import sys
from typing import Any, Dict, Iterable, List, Tuple, Union, get_origin
from cyclopts._convert import token_count
from cyclopts.exceptions import (
CoercionError,
CycloptsError,
MissingArgumentError,
RepeatArgumentError,
ValidationError,
)
from cyclopts.parameter import get_hint_parameter, validate_command
from cyclopts.resolve import ResolvedCommand
from cyclopts.utils import ParameterDict
def normalize_tokens(tokens: Union[None, str, Iterable[str]]) -> List[str]:
if tokens is None:
tokens = sys.argv[1:] # Remove the executable
elif isinstance(tokens, str):
tokens = shlex.split(tokens)
else:
| tokens = list(tokens) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: RoboFlamingo/RoboFlamingo
# Path: open_flamingo/open_flamingo/src/flamingo.py
class Flamingo(nn.Module):
def __init__(
self,
vision_encoder: nn.Module,
lang_encoder: nn.Module,
eoc_token_id: int,
media_token_id: int,
vis_dim: int,
cross_attn_every_n_layers: int = 1,
gradient_checkpointing: bool = False,
):
"""
Args:
vision_encoder (nn.Module): HF CLIPModel
lang_encoder (nn.Module): HF causal language model
eoc_token_id (int): Token id for <|endofchunk|>
media_token_id (int): Token id for <image>
vis_dim (int): Dimension of the visual features.
Visual features are projected to match this shape along the last dimension.
cross_attn_every_n_layers (int, optional): How often to apply cross attention after transformer layer. Defaults to 1.
"""
super().__init__()
self.eoc_token_id = eoc_token_id
self.media_token_id = media_token_id
self.vis_dim = vis_dim
if hasattr(lang_encoder.config, "d_model"):
self.lang_dim = lang_encoder.config.d_model # mpt uses d_model
else:
self.lang_dim = lang_encoder.config.hidden_size
self.vision_encoder = vision_encoder.visual
self.perceiver = PerceiverResampler(dim=self.vis_dim)
self.lang_encoder = lang_encoder
self.lang_encoder.init_flamingo(
media_token_id=media_token_id,
lang_hidden_size=self.lang_dim,
vis_hidden_size=self.vis_dim,
cross_attn_every_n_layers=cross_attn_every_n_layers,
gradient_checkpointing=gradient_checkpointing,
)
self._use_gradient_checkpointing = gradient_checkpointing
self.perceiver._use_gradient_checkpointing = gradient_checkpointing
def forward(
self,
vision_x: torch.Tensor,
lang_x: torch.Tensor,
attention_mask: torch.Tensor = None,
labels: torch.Tensor = None,
clear_conditioned_layers: bool = True,
past_key_values=None,
use_cache: bool = False,
):
"""
Forward pass of Flamingo.
Args:
vision_x (torch.Tensor): Vision input
shape (B, T_img, F, C, H, W) with F=1
lang_x (torch.Tensor): Language input ids
shape (B, T_txt)
attention_mask (torch.Tensor, optional): Attention mask. Defaults to None.
labels (torch.Tensor, optional): Labels. Defaults to None.
clear_conditioned_layers: if True, clear the conditioned layers
once the foward pass is completed. Set this to false if the
same set of images will be reused in another subsequent
forward pass.
past_key_values: pre-computed values to pass to language model.
See past_key_values documentation in Hugging Face
CausalLM models.
use_cache: whether to use cached key values. See use_cache
documentation in Hugging Face CausalLM models.
"""
assert (
self.lang_encoder.initialized_flamingo
), "Flamingo layers are not initialized. Please call `init_flamingo` first."
assert (
self.lang_encoder._use_cached_vision_x or vision_x is not None
), "Must provide either vision_x or have precached media using cache_media()."
if self.lang_encoder._use_cached_vision_x:
# Case: use cached; vision_x should be cached and other
# vision-related inputs should not be provided.
assert (
vision_x is None
), "Expect vision_x to be None when media has been cached using cache_media(). Try uncache_media() first."
assert self.lang_encoder.is_conditioned()
else:
# Case: do not use caching (i.e. this is a standard forward pass);
self._encode_vision_x(vision_x=vision_x)
self._condition_media_locations(input_ids=lang_x)
output = self.lang_encoder(
input_ids=lang_x,
attention_mask=attention_mask,
labels=labels,
past_key_values=past_key_values,
use_cache=use_cache,
)
if clear_conditioned_layers:
self.lang_encoder.clear_conditioned_layers()
return output
def generate(
self,
vision_x: torch.Tensor,
lang_x: torch.Tensor,
attention_mask: torch.Tensor = None,
**kwargs,
):
"""
Generate text conditioned on vision and language inputs.
Args:
vision_x (torch.Tensor): Vision input
shape (B, T_img, F, C, H, W)
images in the same chunk are collated along T_img, and frames are collated along F
currently only F=1 is supported (single-frame videos)
lang_x (torch.Tensor): Language input
shape (B, T_txt)
**kwargs: see generate documentation in Hugging Face CausalLM models. Some notable kwargs:
max_length (int, optional): Maximum length of the output. Defaults to None.
attention_mask (torch.Tensor, optional): Attention mask. Defaults to None.
num_beams (int, optional): Number of beams. Defaults to 1.
max_new_tokens (int, optional): Maximum new tokens. Defaults to None.
temperature (float, optional): Temperature. Defaults to 1.0.
top_k (int, optional): Top k. Defaults to 50.
top_p (float, optional): Top p. Defaults to 1.0.
no_repeat_ngram_size (int, optional): No repeat ngram size. Defaults to 0.
length_penalty (float, optional): Length penalty. Defaults to 1.0.
num_return_sequences (int, optional): Number of return sequences. Defaults to 1.
do_sample (bool, optional): Do sample. Defaults to False.
early_stopping (bool, optional): Early stopping. Defaults to False.
Returns:
torch.Tensor: lang_x with generated tokens appended to it
"""
num_beams = kwargs.pop("num_beams", 1)
if num_beams > 1:
vision_x = vision_x.repeat_interleave(num_beams, dim=0)
self.lang_encoder._use_cached_vision_x = True
self._encode_vision_x(vision_x=vision_x)
eos_token_id = kwargs.pop("eos_token_id", self.eoc_token_id)
output = self.lang_encoder.generate(
input_ids=lang_x,
attention_mask=attention_mask,
eos_token_id=eos_token_id,
num_beams=num_beams,
**kwargs,
)
self.lang_encoder.clear_conditioned_layers()
self.lang_encoder._use_cached_vision_x = False
return output
def _encode_vision_x(self, vision_x: torch.Tensor):
"""
Compute media tokens from vision input by passing it through vision encoder and conditioning language model.
Args:
vision_x (torch.Tensor): Vision input
shape (B, T_img, F, C, H, W)
Images in the same chunk are collated along T_img, and frames are collated along F
Currently only F=1 is supported (single-frame videos)
rearrange code based on https://github.com/dhansmair/flamingo-mini
"""
assert vision_x.ndim == 6, "vision_x should be of shape (b, T_img, F, C, H, W)"
b, T, F = vision_x.shape[:3]
assert F == 1, "Only single frame supported"
vision_x = rearrange(vision_x, "b T F c h w -> (b T F) c h w")
with torch.no_grad():
vision_x = self.vision_encoder(vision_x)[1]
vision_x = rearrange(vision_x, "(b T F) v d -> b T F v d", b=b, T=T, F=F)
vision_x = self.perceiver(vision_x)
for layer in self.lang_encoder._get_decoder_layers():
layer.condition_vis_x(vision_x)
def wrap_fsdp(self, wrapper_kwargs, device_id):
"""
Manually wraps submodules for FSDP and move other parameters to device_id.
Why manually wrap?
- all parameters within the FSDP wrapper must have the same requires_grad.
We have a mix of frozen and unfrozen parameters.
- model.vision_encoder.visual needs to be individually wrapped or encode_vision_x errors
See: https://github.com/pytorch/pytorch/issues/82461#issuecomment-1269136344
The rough wrapping structure is:
- FlamingoModel
- FSDP(FSDP(vision_encoder))
- FSDP(FSDP(perceiver))
- lang_encoder
- FSDP(FSDP(input_embeddings))
- FlamingoLayers
- FSDP(FSDP(gated_cross_attn_layer))
- FSDP(FSDP(decoder_layer))
- FSDP(FSDP(output_embeddings))
- other parameters
Known issues:
- Our FSDP strategy is not compatible with tied embeddings. If the LM embeddings are tied,
train with DDP or set the --freeze_lm_embeddings flag to true.
- With FSDP + gradient ckpting, one can increase the batch size with seemingly no upper bound.
Although the training curves look okay, we found that downstream performance dramatically
degrades if the batch size is unreasonably large (e.g., 100 MMC4 batch size for OPT-125M).
FAQs about our FSDP wrapping strategy:
Why double wrap?
As of torch==2.0.1, FSDP's _post_forward_hook and _post_backward_hook
only free gathered parameters if the module is NOT FSDP root.
Why unfreeze the decoder_layers?
See https://github.com/pytorch/pytorch/issues/95805
As of torch==2.0.1, FSDP's _post_backward_hook is only registed if the flat param
requires_grad=True. We need the postback to fire to avoid OOM.
To effectively freeze the decoder layers, we exclude them from the optimizer.
What is assumed to be frozen v. unfrozen?
We assume that the model is being trained under normal Flamingo settings
with these lines being called in factory.py:
```
# Freeze all parameters
model.requires_grad_(False)
assert sum(p.numel() for p in model.parameters() if p.requires_grad) == 0
# Unfreeze perceiver, gated_cross_attn_layers, and LM input embeddings
model.perceiver.requires_grad_(True)
model.lang_encoder.gated_cross_attn_layers.requires_grad_(True)
[optional] model.lang_encoder.get_input_embeddings().requires_grad_(True)
```
"""
# unfreeze the decoder layers
for block in self.lang_encoder.old_decoder_blocks:
block.requires_grad_(True)
# wrap in FSDP
with enable_wrap(wrapper_cls=FSDP, **wrapper_kwargs):
self.perceiver = wrap(wrap(self.perceiver))
self.lang_encoder.old_decoder_blocks = nn.ModuleList(
wrap(wrap(block)) for block in self.lang_encoder.old_decoder_blocks
)
self.lang_encoder.gated_cross_attn_layers = nn.ModuleList(
wrap(wrap(layer)) if layer is not None else None
for layer in self.lang_encoder.gated_cross_attn_layers
)
self.lang_encoder.init_flamingo_layers(self._use_gradient_checkpointing)
self.lang_encoder.set_input_embeddings(
wrap(wrap(self.lang_encoder.get_input_embeddings()))
)
self.lang_encoder.set_output_embeddings(
wrap(wrap(self.lang_encoder.get_output_embeddings()))
)
self.vision_encoder = wrap(wrap(self.vision_encoder)) # frozen
# manually move non-FSDP managed parameters to device_id
# these are all in lang_encoder
apply_with_stopping_condition(
module=self.lang_encoder,
apply_fn=lambda m: m.to(device_id),
apply_condition=lambda m: len(list(m.children())) == 0,
stopping_condition=lambda m: isinstance(m, FSDP),
)
# exclude the original decoder layers from the optimizer
for block in self.lang_encoder.old_decoder_blocks:
for p in block.parameters():
p.exclude_from_optimizer = True
# set up clip_grad_norm_ function
def clip_grad_norm_(max_norm):
self.perceiver.clip_grad_norm_(max_norm)
for layer in self.lang_encoder.gated_cross_attn_layers:
if layer is not None:
layer.clip_grad_norm_(max_norm)
self.lang_encoder.get_input_embeddings().clip_grad_norm_(max_norm)
self.clip_grad_norm_ = clip_grad_norm_
def _condition_media_locations(self, input_ids: torch.Tensor):
"""
Compute the media token locations from lang_x and condition the language model on these.
Args:
input_ids (torch.Tensor): Language input
shape (B, T_txt)
"""
media_locations = input_ids == self.media_token_id
for layer in self.lang_encoder._get_decoder_layers():
layer.condition_media_locations(media_locations)
def cache_media(self, input_ids: torch.Tensor, vision_x: torch.Tensor):
"""
Pre-cache a prompt/sequence of images / text for log-likelihood evaluations.
All subsequent calls to forward() will generate attending to the LAST
image in vision_x.
This is not meant to be used to cache things for generate().
Args:
input_ids (torch.Tensor): Language input
shape (B, T_txt)
vision_x (torch.Tensor): Vision input
shape (B, T_img, F, C, H, W)
Images in the same chunk are collated along T_img, and frames are collated along F
Currently only F=1 is supported (single-frame videos)
"""
self._encode_vision_x(vision_x=vision_x)
self._condition_media_locations(input_ids=input_ids)
self.lang_encoder._use_cached_vision_x = True
def uncache_media(self):
"""
Clear all conditioning.
"""
self.lang_encoder.clear_conditioned_layers()
self.lang_encoder._use_cached_vision_x = False
# Path: open_flamingo/open_flamingo/src/flamingo_lm.py
class FlamingoLMMixin(nn.Module):
"""
Mixin to add cross-attention layers to a language model.
"""
def set_decoder_layers_attr_name(self, decoder_layers_attr_name):
self.decoder_layers_attr_name = decoder_layers_attr_name
def _get_decoder_layers(self):
return getattr_recursive(self, self.decoder_layers_attr_name)
def _set_decoder_layers(self, value):
setattr_recursive(self, self.decoder_layers_attr_name, value)
def init_flamingo(
self,
media_token_id,
lang_hidden_size,
vis_hidden_size,
cross_attn_every_n_layers,
gradient_checkpointing,
residual=False,
):
"""
Initialize Flamingo by adding a new gated cross attn to the decoder. Store the media token id for computing the media locations.
"""
print('-'*100)
print(self.decoder_layers_attr_name)
self.old_decoder_blocks = self._get_decoder_layers()
self.gated_cross_attn_layers = nn.ModuleList(
[
GatedCrossAttentionBlock(
dim=lang_hidden_size, dim_visual=vis_hidden_size
)
if (layer_idx + 1) % cross_attn_every_n_layers == 0
else None
for layer_idx, _ in enumerate(self._get_decoder_layers())
]
)
self.init_flamingo_layers(gradient_checkpointing, residual=residual)
self.media_token_id = media_token_id
self.initialized_flamingo = True
self._use_cached_vision_x = False
def init_flamingo_layers(self, gradient_checkpointing, residual=False):
"""
Re initializes the FlamingoLayers.
Propagates any changes made to self.gated_corss_attn_layers or self.old_decoder_blocks
"""
self._set_decoder_layers(
nn.ModuleList(
[
FlamingoLayer(
gated_cross_attn_layer, decoder_layer, gradient_checkpointing, residual=residual
)
for gated_cross_attn_layer, decoder_layer in zip(
self.gated_cross_attn_layers, self.old_decoder_blocks
)
]
)
)
def forward(self, input_ids, attention_mask, **kwargs):
"""Condition the Flamingo layers on the media locations before forward()"""
if not self.initialized_flamingo:
raise ValueError(
"Flamingo layers are not initialized. Please call `init_flamingo` first."
)
media_locations = input_ids == self.media_token_id
# if there are media already cached and we're generating and there are no media tokens in the input,
# we'll assume that ALL input tokens should attend to the last previous media that is cached.
# this is especially important for HF generate() compatibility, since generate() calls forward()
# repeatedly one token at a time (with no media tokens).
# without this check, the model would not attend to any images when generating (after the first token)
use_cached_media_locations = (
self._use_cached_vision_x
and self.is_conditioned()
and not media_locations.any()
)
for layer in self._get_decoder_layers():
if not use_cached_media_locations:
layer.condition_media_locations(media_locations)
layer.condition_use_cached_media(use_cached_media_locations)
# package arguments for the other parent's forward. since we don't know the order of the arguments,
# make them all kwargs
kwargs["input_ids"] = input_ids
kwargs["attention_mask"] = attention_mask
return super().forward(**kwargs) # Call the other parent's forward method
def is_conditioned(self) -> bool:
"""Check whether all decoder layers are already conditioned."""
return all(l.is_conditioned() for l in self._get_decoder_layers())
def clone_parameters(self):
for layer in self._get_decoder_layers():
layer.clone_parameters()
def clear_conditioned_layers(self):
for layer in self._get_decoder_layers():
layer.condition_vis_x(None)
layer.condition_media_locations(None)
layer.condition_use_cached_media(None)
# Path: open_flamingo/open_flamingo/src/utils.py
def extend_instance(obj, mixin):
"""Apply mixins to a class instance after creation"""
base_cls = obj.__class__
base_cls_name = obj.__class__.__name__
obj.__class__ = type(
base_cls_name, (mixin, base_cls), {}
) # mixin needs to go first for our forward() logic to work
# Path: open_flamingo/open_flamingo/src/factory.py
from typing import Optional
from transformers import AutoModelForCausalLM, AutoTokenizer
from .flamingo import Flamingo
from .flamingo_lm import FlamingoLMMixin
from .utils import extend_instance
import open_clip
def create_model_and_transforms(
clip_vision_encoder_path: str,
clip_vision_encoder_pretrained: str,
lang_encoder_path: str,
tokenizer_path: str,
cross_attn_every_n_layers: int = 1,
use_local_files: bool = False,
| decoder_layers_attr_name: str = None, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: bigai-nlco/langsuite
# Path: langsuite/constants.py
CSS4_COLORS = {
"aliceblue": "#F0F8FF",
"antiquewhite": "#FAEBD7",
"aqua": "#00FFFF",
"aquamarine": "#7FFFD4",
"azure": "#F0FFFF",
"beige": "#F5F5DC",
"bisque": "#FFE4C4",
"black": "#000000",
"blanchedalmond": "#FFEBCD",
"blue": "#0000FF",
"blueviolet": "#8A2BE2",
"brown": "#A52A2A",
"burlywood": "#DEB887",
"cadetblue": "#5F9EA0",
"chartreuse": "#7FFF00",
"chocolate": "#D2691E",
"coral": "#FF7F50",
"cornflowerblue": "#6495ED",
"cornsilk": "#FFF8DC",
"crimson": "#DC143C",
"cyan": "#00FFFF",
"darkblue": "#00008B",
"darkcyan": "#008B8B",
"darkgoldenrod": "#B8860B",
"darkgray": "#A9A9A9",
"darkgreen": "#006400",
"darkgrey": "#A9A9A9",
"darkkhaki": "#BDB76B",
"darkmagenta": "#8B008B",
"darkolivegreen": "#556B2F",
"darkorange": "#FF8C00",
"darkorchid": "#9932CC",
"darkred": "#8B0000",
"darksalmon": "#E9967A",
"darkseagreen": "#8FBC8F",
"darkslateblue": "#483D8B",
"darkslategray": "#2F4F4F",
"darkslategrey": "#2F4F4F",
"darkturquoise": "#00CED1",
"darkviolet": "#9400D3",
"deeppink": "#FF1493",
"deepskyblue": "#00BFFF",
"dimgray": "#696969",
"dimgrey": "#696969",
"dodgerblue": "#1E90FF",
"firebrick": "#B22222",
"floralwhite": "#FFFAF0",
"forestgreen": "#228B22",
"fuchsia": "#FF00FF",
"gainsboro": "#DCDCDC",
"ghostwhite": "#F8F8FF",
"gold": "#FFD700",
"goldenrod": "#DAA520",
"gray": "#808080",
"green": "#008000",
"greenyellow": "#ADFF2F",
"grey": "#808080",
"honeydew": "#F0FFF0",
"hotpink": "#FF69B4",
"indianred": "#CD5C5C",
"indigo": "#4B0082",
"ivory": "#FFFFF0",
"khaki": "#F0E68C",
"lavender": "#E6E6FA",
"lavenderblush": "#FFF0F5",
"lawngreen": "#7CFC00",
"lemonchiffon": "#FFFACD",
"lightblue": "#ADD8E6",
"lightcoral": "#F08080",
"lightcyan": "#E0FFFF",
"lightgoldenrodyellow": "#FAFAD2",
"lightgray": "#D3D3D3",
"lightgreen": "#90EE90",
"lightgrey": "#D3D3D3",
"lightpink": "#FFB6C1",
"lightsalmon": "#FFA07A",
"lightseagreen": "#20B2AA",
"lightskyblue": "#87CEFA",
"lightslategray": "#778899",
"lightslategrey": "#778899",
"lightsteelblue": "#B0C4DE",
"lightyellow": "#FFFFE0",
"lime": "#00FF00",
"limegreen": "#32CD32",
"linen": "#FAF0E6",
"magenta": "#FF00FF",
"maroon": "#800000",
"mediumaquamarine": "#66CDAA",
"mediumblue": "#0000CD",
"mediumorchid": "#BA55D3",
"mediumpurple": "#9370DB",
"mediumseagreen": "#3CB371",
"mediumslateblue": "#7B68EE",
"mediumspringgreen": "#00FA9A",
"mediumturquoise": "#48D1CC",
"mediumvioletred": "#C71585",
"midnightblue": "#191970",
"mintcream": "#F5FFFA",
"mistyrose": "#FFE4E1",
"moccasin": "#FFE4B5",
"navajowhite": "#FFDEAD",
"navy": "#000080",
"oldlace": "#FDF5E6",
"olive": "#808000",
"olivedrab": "#6B8E23",
"orange": "#FFA500",
"orangered": "#FF4500",
"orchid": "#DA70D6",
"palegoldenrod": "#EEE8AA",
"palegreen": "#98FB98",
"paleturquoise": "#AFEEEE",
"palevioletred": "#DB7093",
"papayawhip": "#FFEFD5",
"peachpuff": "#FFDAB9",
"peru": "#CD853F",
"pink": "#FFC0CB",
"plum": "#DDA0DD",
"powderblue": "#B0E0E6",
"purple": "#800080",
"rebeccapurple": "#663399",
"red": "#FF0000",
"rosybrown": "#BC8F8F",
"royalblue": "#4169E1",
"saddlebrown": "#8B4513",
"salmon": "#FA8072",
"sandybrown": "#F4A460",
"seagreen": "#2E8B57",
"seashell": "#FFF5EE",
"sienna": "#A0522D",
"silver": "#C0C0C0",
"skyblue": "#87CEEB",
"slateblue": "#6A5ACD",
"slategray": "#708090",
"slategrey": "#708090",
"snow": "#FFFAFA",
"springgreen": "#00FF7F",
"steelblue": "#4682B4",
"tan": "#D2B48C",
"teal": "#008080",
"thistle": "#D8BFD8",
"tomato": "#FF6347",
"turquoise": "#40E0D0",
"violet": "#EE82EE",
"wheat": "#F5DEB3",
"white": "#FFFFFF",
"whitesmoke": "#F5F5F5",
"yellow": "#FFFF00",
"yellowgreen": "#9ACD32",
}
# Path: langsuite/shapes.py
class Geometry:
def __init__(self) -> None:
self.shapey_geo = None
def __repr__(self) -> str:
return ""
# Path: langsuite/shapes.py
class Point2D(Geometry):
def __init__(self, *args) -> None:
if len(args) > 2:
raise TypeError(f"Point2D takes at most 2 arguements ({len(args)} given)")
elif len(args) == 2:
self.x, self.y = float(args[0]), float(args[1])
elif len(args) == 1:
if isinstance(args[0], Point2D) or isinstance(args[0], Point):
self.x, self.y = args[0].x, args[0].y
elif type(args[0]) in [list, tuple, np.ndarray] and len(args[0]) == 2:
self.x, self.y = args[0][:2]
else:
raise TypeError(
f"Unsupport argument type for Point2D ({type(args[0])} given)"
)
else:
raise TypeError("Point2D takes at least 1 argument")
self.shapely_geo = Point(self.x, self.y)
@property
def modulus(self) -> float:
return math.sqrt(self.x**2 + self.y**2)
def __add__(self, other):
return Point2D(self.x + other.x, self.y + other.y)
def __sub__(self, other):
return Point2D(self.x - other.x, self.y - other.y)
def __mul__(self, other: float):
return Point2D(self.x * other, self.y * other)
def __truediv__(self, other: float):
if other == 0.0:
raise RuntimeError("Div Zero in Point2D")
return Point2D(self.x / other, self.y / other)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Point2D):
return False
return self.x == other.x and self.y == other.y
def __str__(self) -> str:
return f"({self.x}, {self.y})"
def to_wkt(self) -> str:
return self.shapely_geo.wkt
def to_numpy(self) -> np.ndarray:
return np.array([self.x, self.y], dtype=np.float32)
def rotate(self, angle, center, use_radians=False):
"""Rotation of Polygon2D geometry
Refers to https://shapely.readthedocs.io/en/stable/manual.html#shapely.affinity.rotate
Args:
angle: degrees or radians by setting `use_radians=True`
origin: (x0, y0)
"""
if isinstance(center, Point2D):
center = (center.x, center.y)
# TODO
self.shapely_geo = shapely.affinity.rotate(
self.shapely_geo, angle, center, use_radians
)
self.x = self.shapely_geo.x
self.y = self.shapely_geo.y
# Path: langsuite/shapes.py
class Polygon2D(Geometry):
def __init__(
self,
coords: List[Union[Point2D, Tuple[float, float]]],
holes: Optional[List[Union[Point2D, Tuple[float, float]]]] = None,
) -> None:
self.coords = [Point2D(c) for c in coords]
self.holes = [] if holes is None else [Point2D(c) for c in holes]
self.shapely_geo = Polygon(
shell=[c.shapely_geo for c in self.coords],
holes=[c.shapely_geo for c in self.holes],
)
def __repr__(self) -> str:
return "{" + ", ".join([str(c) for c in self.coords]) + "}"
@property
def area(self) -> float:
return self.shapely_geo.area
@property
def is_closed(self) -> bool:
return len(self.coords) > 1 and self.coords[-1] == self.coords[0]
@property
def length(self) -> float:
return self.shapely_geo.length
@property
def centroid(self) -> Point2D:
return Point2D(self.shapely_geo.centroid)
@property
def x_min(self) -> float:
return np.min([c.x for c in self.coords])
@property
def x_max(self) -> float:
return np.max([c.x for c in self.coords])
@property
def y_min(self) -> float:
return np.min([c.y for c in self.coords])
@property
def y_max(self) -> float:
return np.max([c.y for c in self.coords])
@property
def xy(self):
return self.shapely_geo.exterior.xy
def intersects(self, other) -> bool:
return self.shapely_geo.intersects(other.shapely_geo)
def rotate(self, angle, origin="center", use_radians=False):
"""Rotation of Polygon2D geometry
Refers to https://shapely.readthedocs.io/en/stable/manual.html#shapely.affinity.rotate
Args:
angle: degrees or radians by setting `use_radians=True`
origin: ['center', 'centroid', (x0, y0)]
"""
if isinstance(origin, Point2D):
origin = (origin.x, origin.y)
self.shapely_geo = shapely.affinity.rotate(
self.shapely_geo, angle, origin, use_radians
)
self.coords = [Point2D(c) for c in self.shapely_geo.exterior.coords]
def to_wkt(self) -> str:
"""Well-known text representation of geometry
https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry
Examples:
POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))
POLYGON ((35 10, 45 45, 15 40, 10 20, 35 10), (20 30, 35 35, 30 20, 20 30))
"""
return self.shapely_geo.wkt
def to_numpy(self) -> np.array:
return (
np.array([p.to_numpy() for p in self.coords[:-1]])
if self.is_closed
else np.array([p.to_numpy() for p in self.coords])
)
def contains(self, other) -> bool:
"""Returns True if a Point or a Polygon is contained by the current Polygon
Args:
other: Point2D or Polygon2D
Returns:
a boolean value
"""
if not isinstance(other, Polygon2D) and not isinstance(other, Point2D):
raise TypeError(
f"contains only support Polygon2D or Point2D ({type(other)} given)"
)
return self.shapely_geo.contains(other.shapely_geo)
# Path: langsuite/utils/logging.py
class Logger:
def __init__(
self,
log_level: int = logging.DEBUG,
log_file: str = "",
use_cmd: bool = False,
console_logging=True,
) -> None:
def has_cmdline_interface(self):
def setLevel(self, level):
def set_cmd_client(self, cmd_cli: CMDClient, disable_console_logging=True):
def set_log_file(self, log_file):
def close(self):
def info(self, msg):
def debug(self, msg):
def error(self, msg):
def warn(self, msg):
def user_input(self):
def emit(self, message):
def robot_emit(self, message_or_streamer, name="Robot", action="chat"):
# Path: langsuite/world.py
WORLD_REGISTRY = Registry("world")
# Path: langsuite/world.py
class Door(Object2D):
def __init__(
self,
door_id: str,
*,
alias: Optional[str] = None,
geometry: Optional[Polygon2D] = None,
asset_id: Optional[str] = None,
room2room: Tuple[str] = [],
openable: bool = True,
is_open: bool = True,
**kwargs,
):
super().__init__(
ObjectType.DOOR,
door_id,
alias=alias,
geometry=geometry,
asset_id=asset_id,
**kwargs,
)
self.room2room = room2room
self.openable = openable
self.is_open = is_open
self.wall = None
self.chilren_types = []
# Path: langsuite/world.py
class Object2D:
def __init__(
self,
obj_type: ObjectType,
id: str,
*,
alias: Optional[str] = None,
geometry: Optional[Geometry] = None,
asset_id: Optional[str] = None,
**kwargs,
) -> None:
self.id = id
self.asset_id = asset_id
self.alias = alias
self.obj_type = obj_type
self.geometry = geometry
self.props = dict()
for k, val in kwargs.items():
self.props[k] = val
self.walls = defaultdict()
self.doors = defaultdict()
self.windows = defaultdict()
if "children" in self.props:
self.children = self.props["children"]
else:
self.children = defaultdict()
self.chilren_types = [ObjectType.OBJECT]
@classmethod
def create(cls, obj_data):
return NotImplementedError()
def __repr__(self) -> str:
obj_string = f"asset_id: {self.asset_id}"
return obj_string
def contains(self, other) -> bool:
"""Returns True is another object is in current object
Args:
other: Object2D: an object instance
"""
if not isinstance(other, Object2D):
return ValueError(
f"Invalid input: other has to be of type Object ({type(other)} given)"
)
if other.obj_type not in self.chilren_types:
return False
if other.obj_type == ObjectType.WALL:
return other.id in self.walls.keys()
elif other.obj_type == ObjectType.DOOR:
return other.id in self.doors.keys()
elif other.obj_type == ObjectType.WINDOW:
return other.id in self.windows.keys()
elif other.obj_type == ObjectType.OBJECT:
return other.id in self.children.keys()
else:
raise ValueError(f"Invalid input: {type(other)}.")
def add_wall(self, wall) -> Optional[str]:
if ObjectType.WALL not in self.chilren_types:
raise ValueError(f"Unable to add type {wall.obj_type}")
if wall.id in self.wall:
return wall.id
self.walls[wall.id] = wall
return wall.id
def add_door(self, door) -> Optional[str]:
if ObjectType.DOOR not in self.chilren_types:
raise ValueError(f"Unable to add type {door.obj_type}")
if door.id in self.doors:
return door.id
self.doors[door.id] = door
return door.id
def add_window(self, window) -> Optional[str]:
if ObjectType.WINDOW not in self.chilren_types:
raise ValueError(f"Unable to add type {window.obj_type}")
if window.id in self.windows:
return window.id
self.windows[window.id] = window
return window.id
def add_object(self, object) -> Optional[str]:
if ObjectType.OBJECT not in self.chilren_types:
raise ValueError(f"Unable to add type {object.obj_type}")
if object.id in self.children:
return object.id
self.children[object.id] = object
return object.id
def update_position(self, position):
diff = position - self.position
coords = []
for i in range(len(self.geometry.coords)):
coords.append(self.geometry.coords[i] + diff)
self.geometry = Polygon2D(coords)
self.position = position
# Path: langsuite/world.py
class ObjectType(Enum):
OBJECT = 1
ROOM = 2
WALL = 3
WINDOW = 4
DOOR = 5
# Path: langsuite/world.py
class Room(Object2D):
def __init__(
self,
room_id: str,
*,
alias: Optional[str] = None,
geometry: Optional[Polygon2D] = None,
asset_id: Optional[str] = None,
**kwargs,
):
super().__init__(
ObjectType.ROOM,
room_id,
alias=alias,
geometry=geometry,
asset_id=asset_id,
**kwargs,
)
self.chilren_types = [
ObjectType.OBJECT,
ObjectType.DOOR,
ObjectType.WINDOW,
ObjectType.WALL,
]
# Path: langsuite/world.py
class Wall(Object2D):
def __init__(
self,
wall_id: str,
*,
alias: Optional[str],
geometry: Optional[Geometry],
asset_id: Optional[str],
room2room: Union[Tuple[str], str] = [],
**kwargs,
):
super().__init__(
ObjectType.WALL,
wall_id,
alias=alias,
geometry=geometry,
asset_id=asset_id,
**kwargs,
)
self.chilren_types = [ObjectType.OBJECT, ObjectType.DOOR, ObjectType.WINDOW]
self.room2room = [room2room] if type(room2room) == str else room2room
# Path: langsuite/world.py
class Window(Object2D):
def __init__(
self,
window_id: str,
*,
alias: Optional[str] = None,
geometry: Optional[Polygon2D] = None,
asset_id: Optional[str] = None,
room2room: Tuple[str] = [],
**kwargs,
):
super().__init__(
ObjectType.WINDOW,
window_id,
alias=alias,
geometry=geometry,
asset_id=asset_id,
**kwargs,
)
self.room2room = room2room
self.chilren_types = []
# Path: langsuite/world.py
class World:
def __init__(self, world_id: str):
self.world_id = world_id
self.rooms: Dict[str, Room] = dict()
self.walls: Dict[str, Wall] = dict()
self.doors: Dict[str, Door] = dict()
self.windows: Dict[str, Window] = dict()
self.objects: Dict[str, Object2D] = dict()
self.grid_size = None
self.room_polygons = None
self.id2object = {}
@classmethod
def create(cls, world_cfg):
world_type = world_cfg.get("type")
if world_type is None or len(world_type) == 0:
raise ValueError("World type must be provided to create a world.")
if WORLD_REGISTRY.hasRegistered(world_type):
return WORLD_REGISTRY.get(world_type).create(world_cfg)
else:
raise NotImplementedError(f"World type {world_type} not found.")
def add_room(self, room: Room) -> Optional[str]:
return NotImplementedError()
# Path: langsuite/envs/cwah/cwah_world.py
import copy
import math
import random
import numpy as np
import plotly.graph_objects as go
from collections import defaultdict
from pathlib import Path
from typing import Any, Dict, Optional, Tuple, Union
from langsuite.constants import CSS4_COLORS
from langsuite.shapes import Geometry, Point2D, Polygon2D
from langsuite.utils.logging import logger
from langsuite.world import (
WORLD_REGISTRY,
Door,
Object2D,
ObjectType,
Room,
Wall,
Window,
World,
)
# Copyright (c) BIGAI Research. All rights reserved.
# Licensed under the MIT license.
from __future__ import annotations
CwahPath = Path(__file__).parent
def ToEulerAngles(q):
sinp = 2 * (q[3] * q[1] - q[0] * q[2])
sinp = int(sinp)
pitch = math.asin(sinp)
return pitch
def get_bbox(center, size):
minx = center[0] - (1 / 2) * size[0]
maxx = center[0] + (1 / 2) * size[0]
minz = center[2] - (1 / 2) * size[2]
maxz = center[2] + (1 / 2) * size[2]
return [[minx, minz], [minx, maxz], [maxx, maxz], [maxx, minz]]
class CwahWall(Wall):
def __init__(
self,
wall_id: str,
*,
alias: Optional[str] = None,
geometry: Optional[Geometry] = None,
class_name: Optional[str] = None,
room2room: Union[Tuple[str], str] = list(),
empty: bool,
**kwargs,
):
super().__init__(
wall_id,
alias=alias,
geometry=geometry,
class_name=class_name,
asset_id="not_exist",
room2room=room2room,
**kwargs,
)
self.empty = empty
self.class_name = class_name
@classmethod
def create(cls, wall_data):
polys_2d = Polygon2D(wall_data["polygon"])
empty = wall_data.get("empty", False)
return cls(
wall_data["id"],
geometry=polys_2d,
class_name=wall_data["class_name"],
props=wall_data,
empty=empty,
)
def plot(self, axes=None):
if self.geometry is None:
return
x, y = self.geometry.shapely_geo.exterior.xy
if self.empty:
axes.plot(x, y, color="black", linestyle="-.", linewidth=0.5)
else:
axes.plot(x, y, color="black", linewidth=0.5)
axes.fill(x, y, color="gray")
def render(self, fig=None):
if self.geometry is None:
return
if not fig:
fig = go.Figure()
x, y = self.geometry.shapely_geo.exterior.xy
fig.add_shape(
type="rect",
xref="x",
yref="y",
x0=self.geometry.x_min,
y0=self.geometry.y_min,
x1=self.geometry.x_max,
y1=self.geometry.y_max,
opacity=0.2,
fillcolor="black",
line=dict(width=0),
)
class CwahDoor(Door):
def __init__(
| self, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: radekd91/inferno
# Path: inferno/layers/losses/EmonetLoader.py
def get_emonet(device=None, load_pretrained=True):
device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
path_to_emonet = get_path_to_externals() / "emonet"
if not(str(path_to_emonet) in sys.path or str(path_to_emonet.absolute()) in sys.path):
# print(f"Adding EmoNet path '{path_to_emonet}'")
sys.path += [str(path_to_emonet)]
from emonet.models import EmoNet
# n_expression = 5
n_expression = 8
# Create the model
net = EmoNet(n_expression=n_expression).to(device)
# if load_pretrained:
state_dict_path = Path(
inspect.getfile(EmoNet)).parent.parent.parent / 'pretrained' / f'emonet_{n_expression}.pth'
print(f'Loading the EmoNet model from {state_dict_path}.')
state_dict = torch.load(str(state_dict_path), map_location='cpu')
state_dict = {k.replace('module.', ''): v for k, v in state_dict.items()}
net.load_state_dict(state_dict, strict=False)
if not load_pretrained:
print("Created an untrained EmoNet instance")
net.reset_emo_parameters()
net.eval()
return net
# Path: inferno/models/video_emorec/VideoEmotionClassifier.py
class VideoEmotionClassifier(VideoClassifierBase):
def __init__(self,
cfg
):
self.cfg = cfg
preprocessor = None
feature_model = feature_enc_from_cfg(cfg.model.get('feature_extractor', None))
fusion_layer = None
if not self.is_multi_modal():
feature_size = feature_model.output_feature_dim() if feature_model is not None else cfg.model.input_feature_size
else:
if self.cfg.model.fusion_type == 'tensor':
assert len(self.cfg.model.modality_list) == 2
feature_size = ( cfg.model.input_feature_size + 1) * (feature_model.output_feature_dim() + 1)
elif self.cfg.model.fusion_type == 'tensor_low_rank':
assert len(self.cfg.model.modality_list) == 2
fusion_cfg = self.cfg.model.fusion_cfg
fusion_layer = LowRankTensorFusion(fusion_cfg.rank, [cfg.model.input_feature_size, feature_model.output_feature_dim()], fusion_cfg.output_dim)
feature_size = fusion_layer.output_feature_dim()
else:
feature_size = feature_model.output_feature_dim() + cfg.model.input_feature_size
sequence_classifier = sequence_encoder_from_cfg(cfg.model.get('sequence_encoder', None), feature_size)
classification_head = classification_head_from_cfg(cfg.model.get('classification_head', None),
sequence_classifier.encoder_output_dim(),
cfg.model.output.num_classes,
)
super().__init__(cfg,
preprocessor = preprocessor,
feature_model = feature_model,
fusion_layer = fusion_layer,
sequence_encoder = sequence_classifier,
classification_head = classification_head,
)
@classmethod
def instantiate(cls, cfg, stage, prefix, checkpoint, checkpoint_kwargs) -> 'VideoEmotionClassifier':
"""
Function that instantiates the model from checkpoint or config
"""
if checkpoint is None:
model = VideoEmotionClassifier(cfg)
else:
checkpoint_kwargs = checkpoint_kwargs or {}
model = VideoEmotionClassifier.load_from_checkpoint(
checkpoint_path=checkpoint,
cfg=cfg,
strict=False,
**checkpoint_kwargs
)
# if stage == 'train':
# mode = True
# else:
# mode = False
# model.reconfigure(cfg, prefix, downgrade_ok=True, train=mode)
return model
# Path: inferno/models/IO.py
def get_checkpoint_with_kwargs(cfg, prefix, replace_root = None, relative_to = None, checkpoint_mode=None, pattern=None):
checkpoint = get_checkpoint(cfg, replace_root = replace_root,
relative_to = relative_to, checkpoint_mode=checkpoint_mode, pattern=pattern)
cfg.model.resume_training = False # make sure the training is not magically resumed by the old code
# checkpoint_kwargs = {
# "model_params": cfg.model,
# "learning_params": cfg.learning,
# "inout_params": cfg.inout,
# "stage_name": prefix
# }
checkpoint_kwargs = {'config': cfg}
return checkpoint, checkpoint_kwargs
# Path: inferno/utils/other.py
def class_from_str(str, module=None, none_on_fail = False) -> type:
if module is None:
module = sys.modules[__name__]
if hasattr(module, str):
cl = getattr(module, str)
return cl
elif str.lower() == 'none' or none_on_fail:
return None
raise RuntimeError(f"Class '{str}' not found.")
# Path: inferno/layers/losses/EmoNetLoss.py
def create_emo_loss(device, emoloss = None, trainable=False, dual=False, normalize_features=False, emo_feat_loss=None):
if emoloss is None:
return EmoNetLoss(device, emonet=emoloss)
if isinstance(emoloss, str):
path = Path(emoloss)
if not path.is_absolute():
path = Path(get_path_to_assets()) / path
if path.is_dir():
from inferno.layers.losses.emotion_loss_loader import emo_network_from_path
emo_loss = emo_network_from_path(path)
if isinstance(emo_loss, EmoNetModule):
emonet = emo_loss.emonet
print("Creating EmoNetLoss")
return EmoNetLoss(device, emonet=emonet, trainable=trainable,
normalize_features=normalize_features, emo_feat_loss=emo_feat_loss)
else:
if not dual:
print(f"Creating EmoBackboneLoss, trainable={trainable}")
return EmoBackboneLoss(device, emo_loss, trainable=trainable,
normalize_features=normalize_features, emo_feat_loss=emo_feat_loss)
else:
print(f"Creating EmoBackboneDualLoss")
return EmoBackboneDualLoss(device, emo_loss, trainable=trainable, clone_is_trainable=True,
normalize_features=normalize_features, emo_feat_loss=emo_feat_loss)
else:
raise ValueError("Please specify the directory which contains the config of the trained Emonet.")
else:
raise TypeError(f"Wrong type of emoloss: {type(emoloss)}")
# Path: inferno/layers/losses/emotion_loss_loader.py
def emo_network_from_path(path):
print(f"Loading trained emotion network from: '{path}'")
def load_configs(run_path):
from omegaconf import OmegaConf
with open(Path(run_path) / "cfg.yaml", "r") as f:
conf = OmegaConf.load(f)
if run_path != conf.inout.full_run_dir:
conf.inout.output_dir = str(Path(run_path).parent)
conf.inout.full_run_dir = str(run_path)
conf.inout.checkpoint_dir = str(Path(run_path) / "checkpoints")
return conf
cfg = load_configs(path)
if not bool(cfg.inout.checkpoint_dir):
cfg.inout.checkpoint_dir = str(Path(path) / "checkpoints")
checkpoint_mode = 'best'
stages_prefixes = ""
checkpoint, checkpoint_kwargs = get_checkpoint_with_kwargs(cfg, stages_prefixes,
checkpoint_mode=checkpoint_mode,
# relative_to=relative_to_path,
# replace_root=replace_root_path
)
checkpoint_kwargs = checkpoint_kwargs or {}
if 'emodeca_type' in cfg.model.keys():
module_class = class_from_str(cfg.model.emodeca_type, sys.modules[__name__])
else:
module_class = EmoNetModule
emonet_module = module_class.load_from_checkpoint(checkpoint_path=checkpoint, strict=False,
**checkpoint_kwargs)
return emonet_module
# Path: inferno/layers/losses/Metrics.py
def metric_from_str(metric, **kwargs):
if metric == "cosine_similarity":
return cosine_sim_negative
elif metric in ["l1", "l1_loss", "mae"]:
return torch.nn.functional.l1_loss
elif metric in ["masked_l1", "masked_l1_loss", "masked_mae"]:
return MaskedMAELoss()
elif metric in ["temporal_masked_l1", "temporal_l1_loss", "temporal_mae"]:
return MaskedTemporalMAELoss()
elif metric in ["mse", "mse_loss", "l2", "l2_loss"]:
return torch.nn.functional.mse_loss
elif metric in ["masked_mse", "masked_mse_loss", "masked_l2", "masked_l2_loss"]:
return MaskedMSELoss()
elif metric in ["temporal_mse", "temporal_mse_loss", "temporal_l2", "temporal_l2_loss"]:
return MaskedTemporalMSELoss()
elif metric == "barlow_twins_headless":
return BarlowTwinsLossHeadless(**kwargs)
elif metric == "barlow_twins":
return BarlowTwinsLoss(**kwargs)
else:
raise ValueError(f"Invalid metric for deep feature loss: {metric}")
# Path: inferno/utils/other.py
def get_path_to_assets() -> Path:
import inferno
return Path(inferno.__file__).parents[1] / "assets"
# Path: inferno/layers/losses/Metrics.py
def get_metric(metric):
if isinstance(metric, str):
return metric_from_str(metric)
if isinstance(metric, (DictConfig, Munch)):
return metric_from_cfg(metric)
if isinstance(metric, dict):
return metric_from_cfg(Munch(metric))
raise ValueError(f"invalid type: '{type(metric)}'")
# Path: inferno/layers/losses/BarlowTwins.py
class BarlowTwinsLossHeadless(nn.Module):
def __init__(self, feature_size, batch_size=None, lambd=0.005, final_reduction='mean_on_diag'):
super().__init__()
# normalization layer for the representations z1 and z2
# the affine=False means there are no learnable weights in the BN layer
self.bn = nn.BatchNorm1d(feature_size, affine=False)
self.lambd = lambd
self.batch_size = batch_size
if final_reduction not in ["sum", "mean", "mean_on_diag", "mean_off_diag"]:
raise ValueError(f"Invalid reduction operation for Barlow Twins: '{self.final_reduction}'")
self.final_reduction = final_reduction
def forward(self, z1, z2, batch_size=None, ring_size=None):
assert not (batch_size is not None and self.batch_size is not None)
if ring_size is not None and ring_size > 1:
raise NotImplementedError("Barlow Twins with rings are not yet supported.")
if batch_size is None:
if self.batch_size is not None:
batch_size = self.batch_size
else:
print("[WARNING] Batch size for Barlow Twins loss not explicitly set. "
"This can make problems in multi-gpu training.")
batch_size = z1.shape[0]
# empirical cross-correlation matrix
c = self.bn(z1).T @ self.bn(z2)
c.div_(batch_size)
# sum the cross-correlation matrix between all gpus (if multi-gpu training)
if torch.distributed.is_initialized():
torch.distributed.nn.all_reduce(c)
on_diag = torch.diagonal(c).add_(-1).pow_(2)
off_diag = off_diagonal(c).pow_(2)
# implementation note:
# The original implementation uses 'sum' for final reduction (in fact they never did mean). However,
# if you're using additional losses apart from this one, the 'sum' reduction can significantly change
# the influence of your loss depending on how many elements does the diagonal matrix have. In those cases,
# 'mean' should be more appropriate.
if self.final_reduction == 'sum':
# the original paper
on_diag = on_diag.sum()
off_diag = off_diag.sum()
elif self.final_reduction == 'mean':
# mean of the on diag and off diag elements
# there is much more of off diag elemetns and therefore the mean can add up to disproportionally less
# than what the original implementation intended
on_diag = on_diag.mean()
off_diag = off_diag.mean()
elif self.final_reduction == 'mean_on_diag':
# normalized by number of elements on diagonal
# off diag elements are normalized by number of on diag elements so the proportionality is preserved
n = on_diag.numel()
on_diag = on_diag.mean()
off_diag = off_diag.sum() / n
elif self.final_reduction == 'mean_off_diag':
# normalized by number of elements off diagonal
# on diag elements are normalized by number of off diag elements so the proportionality is preserved
n = off_diag.numel()
on_diag = on_diag.sum() / n
off_diag = off_diag.mean()
else:
raise ValueError(f"Invalid reduction operation for Barlow Twins: '{self.final_reduction}'")
loss = on_diag + self.lambd * off_diag
return loss
# Path: inferno/layers/losses/BarlowTwins.py
class BarlowTwinsLoss(nn.Module):
def __init__(self, feature_size=2048, layer_sizes=None, final_reduction='mean_on_diag'):
super().__init__()
if layer_sizes is None:
# layer_sizes = 3*[2048]
layer_sizes = 3*[8192]
# # projector
# if args.use_projector:
# sizes = [feature_size] + list(map(int, args.projector.split('-')))
sizes = [feature_size] + layer_sizes
layers = []
for i in range(len(sizes) - 2):
layers.append(nn.Linear(sizes[i], sizes[i + 1], bias=False))
layers.append(nn.BatchNorm1d(sizes[i + 1])) # here the BN layer of the projector is learnable
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Linear(sizes[-2], sizes[-1], bias=False))
self.projector = nn.Sequential(*layers)
# else:
# self.projector = None
self.bt_loss_headless = BarlowTwinsLossHeadless(sizes[-1], final_reduction=final_reduction)
def forward(self, y1, y2, batch_size=None, ring_size=None):
if self.projector is not None:
z1 = self.projector(y1)
z2 = self.projector(y2)
else:
z1 = y1
z2 = y2
loss = self.bt_loss_headless(z1, z2, batch_size=batch_size, ring_size=ring_size)
return loss
# Path: inferno/layers/losses/Masked.py
class MaskedLoss(torch.nn.Module):
def __init__(self, func=F.mse_loss, reduction='mean', starting_dim_to_collapse=1):
super().__init__()
self.func = func
self.reduction = reduction
self.starting_dim_to_collapse = starting_dim_to_collapse
assert reduction in ['mean', 'sum', 'none']
def forward(self, input, target, mask=None):
# input: (batch_size, seq_len, ...)
# target: (batch_size, seq_len, ...)
# mask: (batch_size, seq_len)
assert input.shape == target.shape, f"input and target shapes must match, got {input.shape} and {target.shape}"
if mask is None:
return self.func(input, target, reduction=self.reduction)
assert mask.shape[0] == input.shape[0]
if self.starting_dim_to_collapse > 1: # if temporal dimension (B, T, ...), make sure mask has T dimensions
assert mask.shape[1] == input.shape[1]
else:
assert mask.ndim == 1 or (mask.ndim==2 and mask.shape[1] == 1) # for non temporal batching, the mask should be 1d (masking along the batch dimension only)
loss = self.func(input, target, reduction='none')
dims_to_collapse = list(range(self.starting_dim_to_collapse, len(input.shape)))
if len(dims_to_collapse) > 0:
if self.reduction == 'mean':
loss = loss.mean(dim=dims_to_collapse)
elif self.reduction == 'sum':
loss = loss.sum(dim=dims_to_collapse)
assert loss.shape == mask.shape, f"loss and mask shapes must match, got {loss.shape} and {mask.shape}"
loss = loss * mask
reduction_dim = self.starting_dim_to_collapse - 1
if self.reduction == 'mean':
mask_sum = mask.sum(dim=reduction_dim, keepdims=True)
# if (mask_sum == 0).all():
if (mask_sum == 0).any():
print("[WARNING] Skipping loss calculation because mask is all zeros")
return None
loss = loss.sum(dim=reduction_dim, keepdims=True) / mask_sum
loss_is_nan = loss.isnan()
if loss_is_nan.any():
loss = loss[~loss_is_nan]
elif self.reduction == 'sum':
loss = loss.sum(dim=reduction_dim, keepdims=True)
if self.reduction != 'none':
assert loss.isnan().any() == False
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
return loss
# Path: inferno/layers/losses/VideoEmotionLoss.py
import copy
import omegaconf
import torch
import torch.nn.functional as F
import sys
from inferno.layers.losses.EmonetLoader import get_emonet
from pathlib import Path
from inferno.models.video_emorec.VideoEmotionClassifier import VideoEmotionClassifier
from inferno.models.IO import get_checkpoint_with_kwargs
from inferno.utils.other import class_from_str
from .EmoNetLoss import create_emo_loss
from inferno.layers.losses.emotion_loss_loader import emo_network_from_path
from omegaconf import OmegaConf
from .Metrics import metric_from_str
from inferno.utils.other import get_path_to_assets
from .Metrics import get_metric
from .BarlowTwins import BarlowTwinsLossHeadless, BarlowTwinsLoss
from .Masked import MaskedLoss
def load_video_emotion_recognition_net(network_path):
model_config_path = Path(network_path) / "cfg.yaml"
if not model_config_path.is_absolute():
model_config_path = get_path_to_assets() / model_config_path
# load config
model_config = OmegaConf.load(model_config_path)
class_ = class_from_str(model_config.model.pl_module_class, sys.modules[__name__])
# instantiate the model
checkpoint_mode = 'best' # resuming in the same stage, we want to pick up where we left of
checkpoint, checkpoint_kwargs = get_checkpoint_with_kwargs(
model_config, "",
checkpoint_mode=checkpoint_mode,
| pattern="val" |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: google-research/semivl
# Path: version.py
# Path: utils/gen_code_archive.py
def gen_code_archive(out_dir, file='code.tar.gz'):
archive = os.path.join(out_dir, file)
os.makedirs(os.path.dirname(archive), exist_ok=True)
with tarfile.open(archive, mode='w:gz') as tar:
tar.add('.', filter=is_source_file)
return archive
# Path: third_party/unimatch/dataset/semi.py
class SemiDataset(Dataset):
def __init__(self, cfg, mode, id_path=None, nsample=None):
self.name = cfg['dataset']
self.root = os.path.expandvars(os.path.expanduser(cfg['data_root']))
self.mode = mode
self.size = cfg['crop_size']
self.img_scale = cfg['img_scale']
self.scale_ratio_range = cfg.get('scale_ratio_range', (0.5, 2.0))
self.reduce_zero_label = cfg.get('reduce_zero_label', False)
if isinstance(self.img_scale, list):
self.img_scale = tuple(self.img_scale)
self.labeled_photometric_distortion = cfg['labeled_photometric_distortion']
if mode == 'train_l' or mode == 'train_u':
with open(id_path, 'r') as f:
self.ids = f.read().splitlines()
if mode == 'train_l' and nsample is not None:
self.ids *= math.ceil(nsample / len(self.ids))
self.ids = self.ids[:nsample]
else:
if id_path is None:
id_path = 'splits/%s/val.txt' % self.name
with open(id_path, 'r') as f:
self.ids = f.read().splitlines()
def __getitem__(self, item):
id = self.ids[item]
img = Image.open(os.path.join(self.root, id.split(' ')[0])).convert('RGB')
mask = Image.fromarray(np.array(Image.open(os.path.join(self.root, id.split(' ')[1]))))
if self.reduce_zero_label:
mask = np.array(mask)
mask[mask == 0] = 255
mask = mask - 1
mask[mask == 254] = 255
mask = Image.fromarray(mask)
if self.mode == 'val':
if self.img_scale is not None:
res = Resize(img_scale=self.img_scale, min_size=512)(dict(
img=np.array(img),
))
img = Image.fromarray(res['img'])
img, mask = normalize(img, mask)
return img, mask, id
if self.img_scale is not None:
# print('Size before', img.size)
res = Resize(img_scale=self.img_scale, ratio_range=self.scale_ratio_range)(dict(
img=np.array(img),
mask=np.array(mask),
seg_fields=['mask']
))
img = Image.fromarray(res['img'])
mask = Image.fromarray(res['mask'])
# print('Size after', mask.size)
else:
img, mask = resize(img, mask, self.scale_ratio_range)
ignore_value = 254 if self.mode == 'train_u' else 255
img, mask = crop(img, mask, self.size, ignore_value)
img, mask = hflip(img, mask, p=0.5)
if self.mode == 'train_l':
if self.labeled_photometric_distortion:
img = Image.fromarray(
PhotoMetricDistortion()({'img': np.array(img)[..., ::-1]})['img'][..., ::-1]
)
return normalize(img, mask)
img_w, img_s1, img_s2 = deepcopy(img), deepcopy(img), deepcopy(img)
if random.random() < 0.8:
img_s1 = transforms.ColorJitter(0.5, 0.5, 0.5, 0.25)(img_s1)
img_s1 = transforms.RandomGrayscale(p=0.2)(img_s1)
img_s1 = blur(img_s1, p=0.5)
cutmix_box1 = obtain_cutmix_box(img_s1.size[0], p=0.5)
if random.random() < 0.8:
img_s2 = transforms.ColorJitter(0.5, 0.5, 0.5, 0.25)(img_s2)
img_s2 = transforms.RandomGrayscale(p=0.2)(img_s2)
img_s2 = blur(img_s2, p=0.5)
cutmix_box2 = obtain_cutmix_box(img_s2.size[0], p=0.5)
ignore_mask = Image.fromarray(np.zeros((mask.size[1], mask.size[0])))
img_s1, ignore_mask = normalize(img_s1, ignore_mask)
img_s2 = normalize(img_s2)
mask = torch.from_numpy(np.array(mask)).long()
ignore_mask[mask == 254] = 255
return normalize(img_w), img_s1, img_s2, ignore_mask, cutmix_box1, cutmix_box2
def __len__(self):
return len(self.ids)
# Path: model/builder.py
def build_model(cfg):
model_type = cfg['model']
if model_type == 'deeplabv3plus':
model = DeepLabV3Plus(cfg)
elif 'mmseg.' in model_type:
model_type = model_type.replace('mmseg.', '')
model_cfg_file = f'configs/_base_/models/{model_type}.py'
mmseg_cfg = Config.fromfile(model_cfg_file)
mmseg_cfg['model']['decode_head']['num_classes'] = cfg['nclass']
if 'zegclip' in model_type or 'vlm' in model_type:
if mmseg_cfg['img_size'] != cfg['crop_size']:
print('Modify model image_size to match crop_size', cfg['crop_size'])
nested_set(mmseg_cfg, 'img_size', cfg['crop_size'])
nested_set(mmseg_cfg, 'model.backbone.img_size', (cfg['crop_size'], cfg['crop_size']))
nested_set(mmseg_cfg, 'model.decode_head.img_size', cfg['crop_size'])
emb_dataset_prefix = {
'pascal': 'voc12_wbg',
'cityscapes': 'cityscapes',
'coco': 'coco',
'ade': 'ade',
}[cfg['dataset']]
text_embedding_variant = cfg['text_embedding_variant']
text_embedding = f'configs/_base_/datasets/text_embedding/{emb_dataset_prefix}_{text_embedding_variant}.npy'
nested_set(mmseg_cfg, 'model.load_text_embedding', text_embedding)
mcc_text_embedding_variant = cfg['mcc_text']
mcc_text_embedding = f'configs/_base_/datasets/text_embedding/{emb_dataset_prefix}_{mcc_text_embedding_variant}.npy'
nested_set(mmseg_cfg, 'model.load_mcc_text_embedding', mcc_text_embedding)
pl_text_embedding_variant = cfg['pl_text']
pl_text_embedding = f'configs/_base_/datasets/text_embedding/{emb_dataset_prefix}_{pl_text_embedding_variant}.npy'
nested_set(mmseg_cfg, 'model.load_pl_text_embedding', pl_text_embedding)
if mmseg_cfg['model']['decode_head']['type'] == 'ATMSingleHeadSeg':
mmseg_cfg['model']['decode_head']['seen_idx'] = list(range(cfg['nclass']))
mmseg_cfg['model']['decode_head']['all_idx'] = list(range(cfg['nclass']))
if mmseg_cfg['model']['decode_head'].get('loss_decode') is not None and \
mmseg_cfg['model']['decode_head']['loss_decode']['type'] == 'SegLossPlus':
mmseg_cfg['model']['decode_head']['loss_decode']['num_classes'] = cfg['nclass']
if cfg['clip_encoder'] is not None:
clip_encoder_cfg = Config.fromfile(f'configs/_base_/models/{cfg["clip_encoder"]}.py')
clip_encoder_cfg['img_size'] = mmseg_cfg['img_size']
if cfg.get('mcc_fix_resize_pos'):
clip_encoder_cfg['backbone']['img_size'] = mmseg_cfg['img_size']
mmseg_cfg['model']['clip_encoder'] = clip_encoder_cfg['backbone']
if 'model_args' in cfg:
mmseg_cfg['model'].update(cfg['model_args'])
model = build_segmentor(
mmseg_cfg.model,
train_cfg=mmseg_cfg.get('train_cfg'),
test_cfg=mmseg_cfg.get('test_cfg'))
model.disable_dropout = cfg['disable_dropout']
model.fp_rate = cfg['fp_rate']
model.forward = types.MethodType(forward_wrapper, model)
model.init_weights()
else:
raise ValueError(model_type)
return model
# Path: experiments.py
def get_git_revision() -> str:
try:
return subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('ascii').strip()
except subprocess.CalledProcessError:
return ''
# Path: datasets/classes.py
CLASSES = {'pascal': ['background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
'car', 'cat', 'chair', 'cow', 'dining table', 'dog', 'horse', 'motorbike',
'person', 'potted plant', 'sheep', 'sofa', 'train', 'tv/monitor'],
'cityscapes': ['road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light',
'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'car',
'truck', 'bus', 'train', 'motorcycle', 'bicycle'],
'coco': ['void', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter',
'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra',
'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog',
'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table',
'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush', 'banner', 'blanket', 'branch', 'bridge',
'building-other', 'bush', 'cabinet', 'cage', 'cardboard', 'carpet', 'ceiling-other',
'ceiling-tile', 'cloth', 'clothes', 'clouds', 'counter', 'cupboard', 'curtain',
'desk-stuff', 'dirt', 'door-stuff', 'fence', 'floor-marble', 'floor-other', 'floor-stone',
'floor-tile', 'floor-wood', 'flower', 'fog', 'food-other', 'fruit', 'furniture-other',
'grass', 'gravel', 'ground-other', 'hill', 'house', 'leaves', 'light', 'mat', 'metal',
'mirror-stuff', 'moss', 'mountain', 'mud', 'napkin', 'net', 'paper', 'pavement', 'pillow',
'plant-other', 'plastic', 'platform', 'playingfield', 'railing', 'railroad', 'river',
'road', 'rock', 'roof', 'rug', 'salad', 'sand', 'sea', 'shelf', 'sky-other', 'skyscraper',
'snow', 'solid-other', 'stairs', 'stone', 'straw', 'structural-other', 'table', 'tent',
'textile-other', 'towel', 'tree', 'vegetable', 'wall-brick', 'wall-concrete', 'wall-other',
'wall-panel', 'wall-stone', 'wall-tile', 'wall-wood', 'water-other', 'waterdrops',
'window-blind', 'window-other', 'wood'],
'ade': ['wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ', 'windowpane', 'grass', 'cabinet',
'sidewalk', 'person', 'earth', 'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car', 'water',
'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug', 'field', 'armchair', 'seat', 'fence', 'desk',
'rock', 'wardrobe', 'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column', 'signboard',
'chest of drawers','counter', 'sand', 'sink', 'skyscraper', 'fireplace', 'refrigerator', 'grandstand',
'path', 'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door', 'stairway', 'river', 'bridge',
'bookcase', 'blind', 'coffee table', 'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove',
'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar', 'arcade machine', 'hovel', 'bus',
'towel', 'light', 'truck', 'tower', 'chandelier', 'awning', 'streetlight', 'booth', 'television receiver',
'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister', 'escalator', 'ottoman', 'bottle',
'buffet', 'poster', 'stage', 'van', 'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything',
'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent', 'bag', 'minibike', 'cradle', 'oven',
'ball', 'food', 'step', 'tank', 'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake', 'dishwasher',
'screen', 'blanket', 'sculpture', 'hood', 'sconce', 'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier',
'crt screen', 'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass', 'clock', 'flag'],
}
# Path: third_party/unimatch/util/ohem.py
class ProbOhemCrossEntropy2d(nn.Module):
def __init__(self, ignore_index, reduction='mean', thresh=0.7, min_kept=256,
down_ratio=1, use_weight=False):
super(ProbOhemCrossEntropy2d, self).__init__()
self.ignore_index = ignore_index
self.thresh = float(thresh)
self.min_kept = int(min_kept)
self.down_ratio = down_ratio
if use_weight:
weight = torch.FloatTensor(
[0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754, 1.0489,
0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955,
1.0865, 1.1529, 1.0507])
self.criterion = torch.nn.CrossEntropyLoss(reduction=reduction,
weight=weight,
ignore_index=ignore_index)
else:
self.criterion = torch.nn.CrossEntropyLoss(reduction=reduction,
ignore_index=ignore_index)
def forward(self, pred, target):
b, c, h, w = pred.size()
target = target.view(-1)
valid_mask = target.ne(self.ignore_index)
target = target * valid_mask.long()
num_valid = valid_mask.sum()
prob = F.softmax(pred, dim=1)
prob = (prob.transpose(0, 1)).reshape(c, -1)
if self.min_kept > num_valid:
pass
elif num_valid > 0:
prob = prob.masked_fill_(~valid_mask, 1)
mask_prob = prob[
target, torch.arange(len(target), dtype=torch.long)]
threshold = self.thresh
if self.min_kept > 0:
index = mask_prob.argsort()
threshold_index = index[min(len(index), self.min_kept) - 1]
if mask_prob[threshold_index] > self.thresh:
threshold = mask_prob[threshold_index]
kept_mask = mask_prob.le(threshold)
target = target * kept_mask.long()
valid_mask = valid_mask * kept_mask
target = target.masked_fill_(~valid_mask, self.ignore_index)
target = target.view(b, h, w)
return self.criterion(pred, target)
# Path: third_party/unimatch/util/utils.py
def count_params(model):
param_num = sum(p.numel() for p in model.parameters())
return param_num / 1e6
# Path: third_party/unimatch/util/utils.py
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, length=0):
self.length = length
self.reset()
def reset(self):
if self.length > 0:
self.history = []
else:
self.count = 0
self.sum = 0.0
self.val = 0.0
self.avg = 0.0
def update(self, val, num=1):
if self.length > 0:
# currently assert num==1 to avoid bad usage, refine when there are some explict requirements
assert num == 1
self.history.append(val)
if len(self.history) > self.length:
del self.history[0]
self.val = self.history[-1]
self.avg = np.mean(self.history)
else:
self.val = val
self.sum += val * num
self.count += num
self.avg = self.sum / self.count
# Path: third_party/unimatch/util/utils.py
def intersectionAndUnion(output, target, K, ignore_index=255):
# 'K' classes, output and target sizes are N or N * L or N * H * W, each value in range 0 to K - 1.
assert output.ndim in [1, 2, 3]
assert output.shape == target.shape, f'{output.shape} != {target.shape}'
output = output.reshape(output.size).copy()
target = target.reshape(target.size)
output[np.where(target == ignore_index)[0]] = ignore_index
intersection = output[np.where(output == target)[0]]
area_intersection, _ = np.histogram(intersection, bins=np.arange(K + 1))
area_output, _ = np.histogram(output, bins=np.arange(K + 1))
area_target, _ = np.histogram(target, bins=np.arange(K + 1))
area_union = area_output + area_target - area_intersection
return area_intersection, area_union, area_target
# Path: third_party/unimatch/util/utils.py
def init_log(name, level=logging.INFO):
if (name, level) in logs:
return
logs.add((name, level))
logger = logging.getLogger(name)
logger.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
if "SLURM_PROCID" in os.environ:
rank = int(os.environ["SLURM_PROCID"])
logger.addFilter(lambda record: rank == 0)
else:
rank = 0
format_str = "[%(asctime)s][%(levelname)8s] %(message)s"
formatter = logging.Formatter(format_str)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
# Path: third_party/unimatch/util/dist_helper.py
def setup_distributed(backend="nccl", port=None):
"""AdaHessian Optimizer
Lifted from https://github.com/BIGBALLON/distribuuuu/blob/master/distribuuuu/utils.py
Originally licensed MIT, Copyright (c) 2020 Wei Li
"""
num_gpus = torch.cuda.device_count()
rank = int(os.environ["RANK"])
world_size = int(os.environ["WORLD_SIZE"])
torch.cuda.set_device(rank % num_gpus)
dist.init_process_group(
backend=backend,
world_size=world_size,
rank=rank,
)
return rank, world_size
# Path: third_party/unimatch/supervised.py
import argparse
import logging
import os
import pprint
import shutil
import uuid
import torch
import numpy as np
import torch.distributed as dist
import torch.backends.cudnn as cudnn
import yaml
import mmseg
from version import __version__
from datetime import datetime
from utils.gen_code_archive import gen_code_archive
from tqdm import tqdm
from torch import nn
from torch.nn import functional as F
from torch.optim import SGD
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from third_party.unimatch.dataset.semi import SemiDataset
from model.builder import build_model
from mmseg.core import build_optimizer
from experiments import get_git_revision
from datasets.classes import CLASSES
from third_party.unimatch.util.ohem import ProbOhemCrossEntropy2d
from third_party.unimatch.util.utils import count_params, AverageMeter, intersectionAndUnion, init_log
from third_party.unimatch.util.dist_helper import setup_distributed
parser = argparse.ArgumentParser(description='Revisiting Weak-to-Strong Consistency in Semi-Supervised Semantic Segmentation')
parser.add_argument('--config', type=str, required=True)
parser.add_argument('--local_rank', default=0, type=int)
parser.add_argument('--port', default=None, type=int)
def predict(model, img, mask, mode, cfg, return_logits=False):
if mode == 'padded_sliding_window':
grid = cfg['crop_size']
stride = cfg['stride']
if stride < 1:
stride = int(grid * stride)
b, _, h, w = img.shape
final = torch.zeros(b, cfg['nclass'], h, w).cuda()
row = 0
while row < h:
col = 0
| while col < w: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: softwaredoug/searcharray
# Path: searcharray/postings.py
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array, tokenizer=ws_tokenizer) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
if not all(isinstance(x, str) or pd.isna(x) for x in array):
raise TypeError("Expected a list of strings to tokenize")
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer)
postings = cls([], tokenizer=tokenizer)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=self.term_mat.rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
posns = self.posns.positions(term_id, key=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
# Path: searcharray/postings.py
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
# Path: searcharray/postings.py
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
# Path: test/test_extension_array.py
from pandas.tests.extension import base
from searcharray import SearchArray, Terms, TermsDtype
import pandas as pd
import pytest
@pytest.fixture
def dtype():
return TermsDtype()
| @pytest.fixture |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: intellerce/controlanimate
# Path: animatediff/models/attention.py
class Transformer3DModel(ModelMixin, ConfigMixin):
@register_to_config
def __init__(
self,
num_attention_heads: int = 16,
attention_head_dim: int = 88,
in_channels: Optional[int] = None,
num_layers: int = 1,
dropout: float = 0.0,
norm_num_groups: int = 32,
cross_attention_dim: Optional[int] = None,
attention_bias: bool = False,
activation_fn: str = "geglu",
num_embeds_ada_norm: Optional[int] = None,
use_linear_projection: bool = False,
only_cross_attention: bool = False,
upcast_attention: bool = False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
):
super().__init__()
self.use_linear_projection = use_linear_projection
self.num_attention_heads = num_attention_heads
self.attention_head_dim = attention_head_dim
inner_dim = num_attention_heads * attention_head_dim
conv_cls = nn.Conv2d if USE_PEFT_BACKEND else LoRACompatibleConv
linear_cls = nn.Linear if USE_PEFT_BACKEND else LoRACompatibleLinear
# Define input layers
self.in_channels = in_channels
self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)
if use_linear_projection:
self.proj_in = linear_cls(in_channels, inner_dim)
else:
self.proj_in = conv_cls(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)
# Define transformers blocks
self.transformer_blocks = nn.ModuleList(
[
BasicTransformerBlock(
inner_dim,
num_attention_heads,
attention_head_dim,
dropout=dropout,
cross_attention_dim=cross_attention_dim,
activation_fn=activation_fn,
num_embeds_ada_norm=num_embeds_ada_norm,
attention_bias=attention_bias,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
)
for d in range(num_layers)
]
)
# 4. Define output layers
if use_linear_projection:
self.proj_out = linear_cls(in_channels, inner_dim)
else:
self.proj_out = conv_cls(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)
def forward(self, hidden_states, encoder_hidden_states=None, timestep=None, return_dict: bool = True):
# Input
assert hidden_states.dim() == 5, f"Expected hidden_states to have ndim=5, but got ndim={hidden_states.dim()}."
video_length = hidden_states.shape[2]
hidden_states = rearrange(hidden_states, "b c f h w -> (b f) c h w")
encoder_hidden_states = repeat(encoder_hidden_states, 'b n c -> (b f) n c', f=video_length)
batch, channel, height, weight = hidden_states.shape
residual = hidden_states
hidden_states = self.norm(hidden_states)
if not self.use_linear_projection:
hidden_states = self.proj_in(hidden_states)
inner_dim = hidden_states.shape[1]
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim)
else:
inner_dim = hidden_states.shape[1]
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim)
hidden_states = self.proj_in(hidden_states)
# Blocks
for block in self.transformer_blocks:
hidden_states = block(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
timestep=timestep,
video_length=video_length
)
# Output
if not self.use_linear_projection:
hidden_states = (
hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2).contiguous()
)
hidden_states = self.proj_out(hidden_states)
else:
hidden_states = self.proj_out(hidden_states)
hidden_states = (
hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2).contiguous()
)
output = hidden_states + residual
output = rearrange(output, "(b f) c h w -> b c f h w", f=video_length)
if not return_dict:
return (output,)
return Transformer3DModelOutput(sample=output)
# Path: animatediff/models/resnet.py
class Downsample3D(nn.Module):
def __init__(self, channels, use_conv=False, out_channels=None, padding=1, name="conv"):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.padding = padding
stride = 2
self.name = name
if use_conv:
self.conv = InflatedConv3d(self.channels, self.out_channels, 3, stride=stride, padding=padding)
else:
raise NotImplementedError
def forward(self, hidden_states):
assert hidden_states.shape[1] == self.channels
if self.use_conv and self.padding == 0:
raise NotImplementedError
assert hidden_states.shape[1] == self.channels
hidden_states = self.conv(hidden_states)
return hidden_states
# Path: animatediff/models/resnet.py
class ResnetBlock3D(nn.Module):
def __init__(
self,
*,
in_channels,
out_channels=None,
conv_shortcut=False,
dropout=0.0,
temb_channels=512,
groups=32,
groups_out=None,
pre_norm=True,
eps=1e-6,
non_linearity="swish",
time_embedding_norm="default",
output_scale_factor=1.0,
use_in_shortcut=None,
use_inflated_groupnorm=None,
):
super().__init__()
self.pre_norm = pre_norm
self.pre_norm = True
self.in_channels = in_channels
out_channels = in_channels if out_channels is None else out_channels
self.out_channels = out_channels
self.use_conv_shortcut = conv_shortcut
self.time_embedding_norm = time_embedding_norm
self.output_scale_factor = output_scale_factor
linear_cls = LoRACompatibleLinear
conv_cls = LoRACompatibleConv
if groups_out is None:
groups_out = groups
assert use_inflated_groupnorm != None
if use_inflated_groupnorm:
self.norm1 = InflatedGroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)
else:
self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)
self.conv1 = InflatedConv3d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
if temb_channels is not None:
if self.time_embedding_norm == "default":
time_emb_proj_out_channels = out_channels
elif self.time_embedding_norm == "scale_shift":
time_emb_proj_out_channels = out_channels * 2
else:
raise ValueError(f"unknown time_embedding_norm : {self.time_embedding_norm} ")
self.time_emb_proj = linear_cls(temb_channels, time_emb_proj_out_channels)
else:
self.time_emb_proj = None
if use_inflated_groupnorm:
self.norm2 = InflatedGroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True)
else:
self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True)
self.dropout = torch.nn.Dropout(dropout)
self.conv2 = InflatedConv3d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
if non_linearity == "swish":
self.nonlinearity = lambda x: F.silu(x)
elif non_linearity == "mish":
self.nonlinearity = Mish()
elif non_linearity == "silu":
self.nonlinearity = nn.SiLU()
self.use_in_shortcut = self.in_channels != self.out_channels if use_in_shortcut is None else use_in_shortcut
self.conv_shortcut = None
if self.use_in_shortcut:
self.conv_shortcut = InflatedConv3d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, input_tensor, temb):
hidden_states = input_tensor
hidden_states = self.norm1(hidden_states)
hidden_states = self.nonlinearity(hidden_states)
hidden_states = self.conv1(hidden_states)
if temb is not None:
temb = self.time_emb_proj(self.nonlinearity(temb))[:, :, None, None, None]
if temb is not None and self.time_embedding_norm == "default":
hidden_states = hidden_states + temb
hidden_states = self.norm2(hidden_states)
if temb is not None and self.time_embedding_norm == "scale_shift":
scale, shift = torch.chunk(temb, 2, dim=1)
hidden_states = hidden_states * (1 + scale) + shift
hidden_states = self.nonlinearity(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.conv2(hidden_states)
if self.conv_shortcut is not None:
input_tensor = self.conv_shortcut(input_tensor)
output_tensor = (input_tensor + hidden_states) / self.output_scale_factor
return output_tensor
# Path: animatediff/models/resnet.py
class Upsample3D(nn.Module):
def __init__(self, channels, use_conv=False, use_conv_transpose=False, out_channels=None, name="conv"):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_conv_transpose = use_conv_transpose
self.name = name
conv = None
if use_conv_transpose:
raise NotImplementedError
elif use_conv:
self.conv = InflatedConv3d(self.channels, self.out_channels, 3, padding=1)
def forward(self, hidden_states, output_size=None):
assert hidden_states.shape[1] == self.channels
if self.use_conv_transpose:
raise NotImplementedError
# Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16
dtype = hidden_states.dtype
if dtype == torch.bfloat16:
hidden_states = hidden_states.to(torch.float32)
# upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984
if hidden_states.shape[0] >= 64:
hidden_states = hidden_states.contiguous()
# if `output_size` is passed we force the interpolation output
# size and do not make use of `scale_factor=2`
if output_size is None:
hidden_states = F.interpolate(hidden_states, scale_factor=[1.0, 2.0, 2.0], mode="nearest")
else:
hidden_states = F.interpolate(hidden_states, size=output_size, mode="nearest")
# If the input is bfloat16, we cast back to bfloat16
if dtype == torch.bfloat16:
hidden_states = hidden_states.to(dtype)
# if self.use_conv:
# if self.name == "conv":
# hidden_states = self.conv(hidden_states)
# else:
# hidden_states = self.Conv2d_0(hidden_states)
hidden_states = self.conv(hidden_states)
return hidden_states
# Path: animatediff/models/motion_module.py
def get_motion_module(
in_channels,
motion_module_type: str,
motion_module_kwargs: dict
):
if motion_module_type == "Vanilla":
return VanillaTemporalModule(in_channels=in_channels, **motion_module_kwargs,)
else:
raise ValueError
# Path: animatediff/models/unet_blocks.py
import torch
import pdb
from torch import nn
from .attention import Transformer3DModel
from .resnet import Downsample3D, ResnetBlock3D, Upsample3D
from .motion_module import get_motion_module
from diffusers.models.lora import LoRACompatibleConv, LoRACompatibleLinear
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
downsample_padding=None,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_inflated_groupnorm=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type
if down_block_type == "DownBlock3D":
return DownBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
resnet_time_scale_shift=resnet_time_scale_shift,
use_inflated_groupnorm=use_inflated_groupnorm,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
elif down_block_type == "CrossAttnDownBlock3D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock3D")
return CrossAttnDownBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attn_num_head_channels,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_inflated_groupnorm=use_inflated_groupnorm,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
raise ValueError(f"{down_block_type} does not exist.")
def get_up_block(
up_block_type,
num_layers,
in_channels,
out_channels,
prev_output_channel,
temb_channels,
add_upsample,
resnet_eps,
resnet_act_fn,
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_inflated_groupnorm=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type
if up_block_type == "UpBlock3D":
return UpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
resnet_time_scale_shift=resnet_time_scale_shift,
use_inflated_groupnorm=use_inflated_groupnorm,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
elif up_block_type == "CrossAttnUpBlock3D":
if cross_attention_dim is None:
| raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock3D") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Zaczero/openstreetmap-ng
# Path: src/config.py
APP_URL = os.environ['APP_URL'].rstrip('/')
# Path: src/config.py
DEFAULT_LANGUAGE = 'en'
# Path: src/lib/avatar.py
class Avatar:
@staticmethod
def get_url(avatar_type: AvatarType, avatar_id: str | int) -> str:
"""
Get the url of the avatar image.
"""
if avatar_type == AvatarType.default:
return '/static/img/avatar.webp'
elif avatar_type == AvatarType.gravatar:
return f'/api/web/avatar/gravatar/{avatar_id}'
elif avatar_type == AvatarType.custom:
return f'/api/web/avatar/custom/{avatar_id}'
else:
raise NotImplementedError(f'Unsupported avatar type {avatar_type!r}')
@staticmethod
async def get_default_image() -> bytes:
"""
Get the default avatar image.
"""
return await Path('static/img/avatar.webp').read_bytes()
@staticmethod
def normalize_image(data: bytes) -> bytes:
"""
Normalize the avatar image.
- Orientation: rotate
- Shape ratio: crop
- Megapixels: downscale
- File size: reduce quality
"""
img = Image.open(BytesIO(data))
# normalize orientation
ImageOps.exif_transpose(img, in_place=True)
# normalize shape ratio
ratio = img.width / img.height
if ratio > AVATAR_MAX_RATIO:
width = int(img.height * AVATAR_MAX_RATIO)
img = img.crop(((img.width - width) // 2, 0, (img.width + width) // 2, img.height))
elif ratio < 1 / AVATAR_MAX_RATIO:
height = int(img.width / AVATAR_MAX_RATIO)
img = img.crop((0, (img.height - height) // 2, img.width, (img.height + height) // 2))
# normalize megapixels
mp_ratio = (img.width * img.height) / AVATAR_MAX_MEGAPIXELS
if mp_ratio > 1:
img.thumbnail((img.width // mp_ratio, img.height // mp_ratio))
# normalize file size
with BytesIO() as buffer:
for quality in (95, 90, 80, 70, 60, 50):
buffer.seek(0)
buffer.truncate()
img.save(buffer, format='WEBP', quality=quality)
if buffer.tell() <= AVATAR_MAX_FILE_SIZE:
return buffer.getvalue()
raise_for().avatar_too_big()
# Path: src/lib/crypto.py
HASH_SIZE = 32
# Path: src/lib/languages.py
def get_language_info(normalized_code: str) -> LanguageInfo | None:
"""
Get `LanguageInfo` by normalized code.
"""
return _languages.get(normalized_code, None)
# Path: src/lib/languages.py
def normalize_language_case(code: str) -> str:
"""
Normalize language code case.
>>> fix_language_case('EN')
'en'
>>> fix_language_case('NonExistent')
'NonExistent'
"""
if code in _languages:
return code
return _languages_lower_map.get(code.casefold(), code)
# Path: src/lib/password_hash.py
class PasswordHash:
rehash_needed: bool | None = None
def __init__(self, hasher: PasswordHasher):
self._hasher = hasher
def verify(self, password_hashed: str, salt: str | None, password: str) -> bool:
"""
Verify a password against a hash and optional salt.
Returns `True` if the password matches, `False` otherwise.
If the password matches but the hash needs to be rehashed, `rehash_needed` will be set to `True`.
"""
if self.rehash_needed is not None:
raise RuntimeError(f'{self.verify.__qualname__} was reused')
# argon2
if password_hashed.startswith('$argon2'):
if salt is not None:
logging.warning('Unexpected salt for Argon2 hash')
if self._hasher.verify(password_hashed, password):
self.rehash_needed = self._hasher.check_needs_rehash(password_hashed)
return True
else:
self.rehash_needed = False
return False
# rehash deprecated methods
self.rehash_needed = True
# md5 (deprecated)
if len(password_hashed) == 32:
valid_hash = md5(((salt or '') + password).encode()).hexdigest() # noqa: S324
return compare_digest(password_hashed, valid_hash)
# pbkdf2 (deprecated)
if salt and '!' in salt:
password_hashed_b = base64.b64decode(password_hashed)
algorithm, iterations_, salt = salt.split('!')
iterations = int(iterations_)
valid_hash = pbkdf2_hmac(algorithm, password.encode(), salt.encode(), iterations, len(password_hashed_b))
return compare_digest(password_hashed_b, valid_hash)
hash_len = len(password_hashed)
salt_len = len(salt or '')
raise ValueError(f'Unknown password hash format: {hash_len=}, {salt_len=}')
def hash(self, password: str) -> str: # noqa: A003
"""
Hash a password using latest recommended algorithm.
"""
return self._hasher.hash(password)
@classmethod
def default(cls) -> Self:
"""
Get a default password hasher.
"""
return cls(UserRole.get_password_hasher(()))
# Path: src/lib/rich_text.py
class RichTextMixin:
__rich_text_fields__: Sequence[tuple[str, TextFormat]] = ()
async def resolve_rich_text(self) -> None:
"""
Resolve rich text fields.
"""
async def resolve_task(field_name: str, text_format: TextFormat) -> None:
rich_field_name = field_name + '_rich'
# skip if already resolved
if getattr(self, rich_field_name) is not None:
return
rich_hash_field_name = field_name + '_rich_hash'
text: str = getattr(self, field_name)
text_rich_hash: bytes | None = getattr(self, rich_hash_field_name)
cache = await RichText.get_cache(text, text_rich_hash, text_format)
# assign new hash if changed
if text_rich_hash != cache.id:
async with DB() as session:
cls = type(self)
stmt = (
update(cls)
.where(cls.id == self.id, getattr(cls, rich_hash_field_name) == text_rich_hash)
.values({rich_hash_field_name: cache.id})
)
await session.execute(stmt)
setattr(self, rich_hash_field_name, cache.id)
# assign value to instance
setattr(self, rich_field_name, cache)
# small optimization, don't create task group if at most one field
if len(self.__rich_text_fields__) <= 1:
for field_name, text_format in self.__rich_text_fields__:
await resolve_task(field_name, text_format)
else:
async with anyio.create_task_group() as tg:
for field_name, text_format in self.__rich_text_fields__:
tg.start_soon(resolve_task, field_name, text_format)
# Path: src/lib/storage/base.py
STORAGE_KEY_MAX_LENGTH = 64
# Path: src/lib_cython/geo_utils.py
def haversine_distance(p1: Point, p2: Point) -> cython.double:
"""
Calculate the distance between two points on the Earth's surface using the Haversine formula.
Returns the distance in meters.
"""
lon1: cython.double = p1.x
lat1: cython.double = p1.y
lon2: cython.double = p2.x
lat2: cython.double = p2.y
dlon = radians(lon2 - lon1)
dlat = radians(lat2 - lat1)
a = sin(dlat / 2) ** 2 + cos(radians(lat1)) * cos(radians(lat2)) * sin(dlon / 2) ** 2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
return c * 6371000 # R
# Path: src/limits.py
LANGUAGE_CODE_MAX_LENGTH = 10
# Path: src/limits.py
USER_DESCRIPTION_MAX_LENGTH = 100_000 # NOTE: value TBD
# Path: src/limits.py
USER_LANGUAGES_LIMIT = 10
# Path: src/models/auth_provider.py
class AuthProvider(BaseEnum):
openid = 'openid'
google = 'google'
facebook = 'facebook'
microsoft = 'microsoft'
github = 'github'
wikipedia = 'wikipedia'
# Path: src/models/avatar_type.py
class AvatarType(BaseEnum):
default = 'default'
gravatar = 'gravatar'
custom = 'custom'
# Path: src/models/db/base.py
class Base:
class NoID(MappedAsDataclass, DeclarativeBase, kw_only=True):
pass
class Sequential(NoID):
__abstract__ = True
id: Mapped[int] = mapped_column(BigInteger, nullable=False, primary_key=True)
class UUID(NoID):
__abstract__ = True
# TODO: sortable like timeflake or ulid if needed?
id: Mapped[UUID] = mapped_column(Uuid, nullable=False, primary_key=True, default_factory=uuid4)
class Validating(BaseModel, ABC):
# use_enum_values=True is unpredictable
# see https://github.com/pydantic/pydantic/issues/6565
model_config = ConfigDict(
allow_inf_nan=False,
arbitrary_types_allowed=True,
from_attributes=True,
validate_assignment=True,
validate_default=True,
) # TODO: True only dev/test
@field_validator('*')
@classmethod
def str_validator(cls, v):
if isinstance(v, str) and v:
# check for invalid XML 1.0 characters
if _BAD_XML_RE.search(v):
raise ValueError(f'Invalid XML 1.0 characters {v!r}')
# normalize unicode to NFC form
return unicode_normalize(v)
return v
@classmethod
def from_orm(cls, orm, *, validate: bool = True) -> Self:
if validate:
return cls.model_validate(orm)
else:
return cls.model_construct(orm)
def to_orm_dict(self) -> dict:
return super().model_dump(by_alias=True)
# Path: src/models/db/cache_entry.py
class CacheEntry(Base.NoID):
__tablename__ = 'cache'
id: Mapped[bytes] = mapped_column(LargeBinary(HASH_SIZE), nullable=False, primary_key=True)
expires_at: Mapped[datetime] = mapped_column(DateTime, nullable=False) # TODO: pruner
value: Mapped[str] = mapped_column(UnicodeText, nullable=False)
# Path: src/models/db/created_at_mixin.py
class CreatedAtMixin:
created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, default=func.now())
# Path: src/models/editor.py
class Editor(BaseEnum):
id = 'id'
remote = 'remote'
# Path: src/models/geometry_type.py
class PointType(TypeDecorator):
impl = Geometry(geometry_type='POINT', srid=SRID, spatial_index=False)
cache_ok = True
def process_bind_param(self, value: Point | None, _: Dialect) -> WKBElement | None:
if value is None:
return None
return from_shape(value, srid=SRID)
def process_result_value(self, value: WKBElement | None, _: Dialect) -> Point | None:
if value is None:
return None
return to_shape(value)
# Path: src/models/language_info.py
class LanguageInfo(NamedTuple):
code: str
english_name: str
native_name: str
@property
def display_name(self) -> str:
return f'{self.english_name} ({self.native_name})'
# Path: src/models/scope.py
class ExtendedScope(BaseEnum):
"""
Extended scopes with entries that are not obtainable by normal means.
"""
read_prefs = 'read_prefs'
write_prefs = 'write_prefs'
write_diary = 'write_diary'
write_api = 'write_api'
read_gpx = 'read_gpx'
write_gpx = 'write_gpx'
write_notes = 'write_notes'
# additional scopes
read_email = 'read_email'
skip_authorization = 'skip_authorization'
web_user = 'web_user'
# role-specific scopes
role_moderator = 'role_moderator'
role_administrator = 'role_administrator'
# Path: src/models/text_format.py
class TextFormat(BaseEnum):
html = 'html'
markdown = 'markdown'
plain = 'plain'
# Path: src/models/user_role.py
class UserRole(BaseEnum):
moderator = 'moderator'
administrator = 'administrator'
@staticmethod
def get_changeset_max_size(roles: Sequence[Self]) -> int:
"""
Get the maximum size of a changeset for the given roles.
"""
if not roles:
roles = [None]
return max(_changeset_max_size[r] for r in roles)
@staticmethod
def get_password_hasher(roles: Sequence[Self]) -> PasswordHasher:
"""
Get the password hasher for the given roles.
"""
if not roles:
roles = [None]
return max((_password_hasher[r] for r in roles), key=itemgetter(0))[1]
# Path: src/models/user_status.py
class UserStatus(BaseEnum):
pending = 'pending'
active = 'active'
# Path: src/models/db/user.py
from collections.abc import Sequence
from datetime import datetime
from ipaddress import IPv4Address, IPv6Address
from typing import TYPE_CHECKING
from email_validator.rfc_constants import EMAIL_MAX_LENGTH
from shapely.geometry import Point
from sqlalchemy import (
ARRAY,
Boolean,
DateTime,
Enum,
LargeBinary,
SmallInteger,
Unicode,
UnicodeText,
UniqueConstraint,
func,
)
from sqlalchemy.dialects.postgresql import INET
from sqlalchemy.orm import Mapped, mapped_column, relationship, validates
from src.config import APP_URL, DEFAULT_LANGUAGE
from src.lib.avatar import Avatar
from src.lib.crypto import HASH_SIZE
from src.lib.languages import get_language_info, normalize_language_case
from src.lib.password_hash import PasswordHash
from src.lib.rich_text import RichTextMixin
from src.lib.storage.base import STORAGE_KEY_MAX_LENGTH
from src.lib_cython.geo_utils import haversine_distance
from src.limits import (
LANGUAGE_CODE_MAX_LENGTH,
USER_DESCRIPTION_MAX_LENGTH,
USER_LANGUAGES_LIMIT,
)
from src.models.auth_provider import AuthProvider
from src.models.avatar_type import AvatarType
from src.models.db.base import Base
from src.models.db.cache_entry import CacheEntry
from src.models.db.created_at_mixin import CreatedAtMixin
from src.models.editor import Editor
from src.models.geometry_type import PointType
from src.models.language_info import LanguageInfo
from src.models.scope import ExtendedScope
from src.models.text_format import TextFormat
from src.models.user_role import UserRole
from src.models.user_status import UserStatus
from src.models.db.oauth1_application import OAuth1Application
from src.models.db.oauth2_application import OAuth2Application
from src.models.db.user_block import UserBlock
class User(Base.Sequential, CreatedAtMixin, RichTextMixin):
__tablename__ = 'user'
__rich_text_fields__ = (('description', TextFormat.markdown),)
email: Mapped[str] = mapped_column(Unicode(EMAIL_MAX_LENGTH), nullable=False)
display_name: Mapped[str] = mapped_column(Unicode, nullable=False)
password_hashed: Mapped[str] = mapped_column(Unicode, nullable=False)
created_ip: Mapped[IPv4Address | IPv6Address] = mapped_column(INET, nullable=False)
status: Mapped[UserStatus] = mapped_column(Enum(UserStatus), nullable=False)
auth_provider: Mapped[AuthProvider | None] = mapped_column(Enum(AuthProvider), nullable=True)
auth_uid: Mapped[str | None] = mapped_column(Unicode, nullable=True)
languages: Mapped[list[str]] = mapped_column(ARRAY(Unicode(LANGUAGE_CODE_MAX_LENGTH)), nullable=False)
# defaults
password_changed_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True, default=func.now())
password_salt: Mapped[str | None] = mapped_column(Unicode, nullable=True, default=None)
consider_public_domain: Mapped[bool] = mapped_column(Boolean, nullable=False)
roles: Mapped[list[UserRole]] = mapped_column(ARRAY(Enum(UserRole)), nullable=False, default=())
description: Mapped[str] = mapped_column(UnicodeText, nullable=False, default='')
description_rich_hash: Mapped[bytes | None] = mapped_column(LargeBinary(HASH_SIZE), nullable=True, default=None)
description_rich: Mapped[CacheEntry | None] = relationship(
CacheEntry,
primaryjoin=CacheEntry.id == description_rich_hash,
viewonly=True,
default=None,
lazy='raise',
)
editor: Mapped[Editor | None] = mapped_column(Enum(Editor), nullable=True, default=None)
avatar_type: Mapped[AvatarType] = mapped_column(Enum(AvatarType), nullable=False, default=AvatarType.default)
avatar_id: Mapped[str | None] = mapped_column(Unicode(STORAGE_KEY_MAX_LENGTH), nullable=True, default=None)
home_point: Mapped[Point | None] = mapped_column(PointType, nullable=True, default=None)
home_zoom: Mapped[int | None] = mapped_column(SmallInteger, nullable=True, default=None)
# relationships (avoid circular imports)
if TYPE_CHECKING:
oauth1_applications: Mapped[list['OAuth1Application']] = relationship(
back_populates='user',
order_by='OAuth1Application.id.asc()',
lazy='raise',
)
oauth2_applications: Mapped[list['OAuth2Application']] = relationship(
back_populates='user',
order_by='OAuth2Application.id.asc()',
lazy='raise',
)
user_blocks_given: Mapped[list['UserBlock']] = relationship(
back_populates='from_user',
order_by='UserBlock.id.desc()',
lazy='raise',
)
user_blocks_received: Mapped[list['UserBlock']] = relationship(
back_populates='to_user',
order_by='UserBlock.id.desc()',
lazy='raise',
)
active_user_blocks_received: Mapped[list['UserBlock']] = relationship(
back_populates='to_user',
order_by='UserBlock.id.desc()',
| lazy='raise', |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: codefuse-ai/Collinear-Constrained-Attention
# Path: tokenizer/gpt2_tokenization.py
class GPT2Tokenizer(object):
"""
GPT-2 BPE tokenizer. Peculiarities:
- Byte-level BPE
"""
@classmethod
def from_pretrained(
cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs
):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
merges_file = PRETRAINED_MERGES_ARCHIVE_MAP[pretrained_model_name_or_path]
special_tokens_file = None
else:
vocab_file = os.path.join(pretrained_model_name_or_path, VOCAB_NAME)
merges_file = os.path.join(pretrained_model_name_or_path, MERGES_NAME)
special_tokens_file = os.path.join(
pretrained_model_name_or_path, SPECIAL_TOKENS_NAME
)
if not os.path.exists(special_tokens_file):
special_tokens_file = None
else:
logger.info(
"loading special tokens file {}".format(special_tokens_file)
)
# redirect to the cache, if necessary
try:
from .file_utils import cached_path
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
resolved_merges_file = cached_path(merges_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} and {} "
"at this path or url.".format(
pretrained_model_name_or_path,
", ".join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
pretrained_model_name_or_path,
vocab_file,
merges_file,
)
)
return None
if resolved_vocab_file == vocab_file and resolved_merges_file == merges_file:
logger.info("loading vocabulary file {}".format(vocab_file))
logger.info("loading merges file {}".format(merges_file))
else:
logger.info(
"loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file
)
)
logger.info(
"loading merges file {} from cache at {}".format(
merges_file, resolved_merges_file
)
)
if (
pretrained_model_name_or_path
in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP
):
# if we're using a pretrained model, ensure the tokenizer won't index sequences longer
# than the number of positional embeddings
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[
pretrained_model_name_or_path
]
kwargs["max_len"] = min(kwargs.get("max_len", int(1e12)), max_len)
# Instantiate tokenizer.
if special_tokens_file and "special_tokens" not in kwargs:
special_tokens = (
open(special_tokens_file, encoding="utf-8").read().split("\n")[:-1]
)
else:
special_tokens = kwargs.pop("special_tokens", [])
tokenizer = cls(
resolved_vocab_file,
resolved_merges_file,
special_tokens=special_tokens,
*inputs,
**kwargs
)
return tokenizer
def __init__(
self,
vocab_file,
merges_file,
errors="replace",
special_tokens=None,
max_len=None,
):
self.max_len = max_len if max_len is not None else int(1e12)
self.encoder = json.load(open(vocab_file))
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
bpe_data = open(merges_file, encoding="utf-8").read().split("\n")[1:-1]
bpe_merges = [tuple(merge.split()) for merge in bpe_data]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
# Should haved added re.IGNORECASE so BPE merges can happen for
# capitalized versions of contractions
self.pat = re.compile(
r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+"""
)
self.special_tokens = {}
self.special_tokens_decoder = {}
self.set_special_tokens(special_tokens)
def __len__(self):
return len(self.encoder) + len(self.special_tokens)
def set_special_tokens(self, special_tokens):
"""Add a list of additional tokens to the encoder.
The additional tokens are indexed starting from the last index of the
current vocabulary in the order of the `special_tokens` list.
"""
if not special_tokens:
self.special_tokens = {}
self.special_tokens_decoder = {}
return
self.special_tokens = dict(
(tok, len(self.encoder) + i) for i, tok in enumerate(special_tokens)
)
self.special_tokens_decoder = {v: k for k, v in self.special_tokens.items()}
logger.info("Special tokens {}".format(self.special_tokens))
@lru_cache(maxsize=131072)
def bpe(self, token):
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except BaseException:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
return word
def tokenize(self, text):
"""Tokenize a string."""
bpe_tokens = []
for token in re.findall(self.pat, text):
if sys.version_info[0] == 2:
token = "".join(self.byte_encoder[ord(b)] for b in token)
else:
token = "".join(self.byte_encoder[b] for b in token.encode("utf-8"))
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
return bpe_tokens
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
if isinstance(tokens, str) or (
sys.version_info[0] == 2 and isinstance(tokens, unicode)
):
if tokens in self.special_tokens:
return self.special_tokens[tokens]
else:
return self.encoder.get(tokens, 0)
for token in tokens:
if token in self.special_tokens:
ids.append(self.special_tokens[token])
else:
ids.append(self.encoder.get(token, 0))
if len(ids) > self.max_len:
logger.warning(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this OpenAI GPT model ({} > {}). Running this"
" sequence through the model will result in indexing errors".format(
len(ids), self.max_len
)
)
return ids
def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
"""Converts a sequence of ids in BPE tokens using the vocab."""
tokens = []
for i in ids:
if i in self.special_tokens_decoder:
if not skip_special_tokens:
tokens.append(self.special_tokens_decoder[i])
else:
tokens.append(self.decoder[i])
return tokens
def encode(self, text):
return self.convert_tokens_to_ids(self.tokenize(text))
def decode(self, tokens):
text = "".join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode(
"utf-8", errors=self.errors
)
return text
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary and merge files to a directory."""
if not os.path.isdir(vocab_path):
logger.error(
"Vocabulary path ({}) should be a directory".format(vocab_path)
)
return
vocab_file = os.path.join(vocab_path, VOCAB_NAME)
merge_file = os.path.join(vocab_path, MERGES_NAME)
special_tokens_file = os.path.join(vocab_path, SPECIAL_TOKENS_NAME)
with open(vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(
self.bpe_ranks.items(), key=lambda kv: kv[1]
):
if index != token_index:
logger.warning(
"Saving vocabulary to {}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!".format(
merge_file
)
)
index = token_index
writer.write(" ".join(bpe_tokens) + "\n")
index += 1
index = len(self.encoder)
with open(special_tokens_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(
self.special_tokens.items(), key=lambda kv: kv[1]
):
if index != token_index:
logger.warning(
"Saving special tokens vocabulary to {}: BPE indices are not consecutive."
" Please check that the tokenizer is not corrupted!".format(
special_tokens_file
)
)
index = token_index
writer.write(token + "\n")
index += 1
return vocab_file, merge_file, special_tokens_file
# Path: utils/common_utils.py
def print_rank_0(*message):
"""If distributed is initialized print only on rank 0."""
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
print(*message, flush=True)
else:
print(*message, flush=True)
# Path: utils/common_utils.py
def is_old_version(path):
new_vocab_files = ['merge.model']
new_vocab_file_exists = []
for filename in new_vocab_files:
if not os.path.exists(os.path.join(path, filename)):
new_vocab_file_exists.append(False)
else:
new_vocab_file_exists.append(True)
if all(new_vocab_file_exists):
return False
if any(new_vocab_file_exists):
return 'new_version_file_absent'
else:
return True
# Path: tokenizer/tokenizer.py
from abc import ABC
from abc import abstractmethod
from tokenizers import Tokenizer
from transformers import GPT2Tokenizer, GPT2TokenizerFast
from typing import List, Union
from .gpt2_tokenization import GPT2Tokenizer
from utils.common_utils import print_rank_0, is_old_version
from model.glm.tokenization_glm import GLMTokenizer
from model.glm.tokenization_glm_deprecated import GLMChineseTokenizer
import numpy as np
import sentencepiece as spm
import tiktoken
# Copyright (c) 2021, EleutherAI
# This file is based on code by the authors denoted below and has been modified from its original version.
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Megatron tokenizers."""
def build_tokenizer(args):
"""Initialize tokenizer."""
print_rank_0("> building {} tokenizer ...".format(args.tokenizer_type))
# if args.rank == 0:
# print("> building {} tokenizer ...".format(args.tokenizer_type), flush=True)
# Select and instantiate the tokenizer.
if args.tokenizer_type.lower() == "GPT2BPETokenizer".lower():
assert args.vocab_file is not None
assert args.merge_file is not None
tokenizer = _GPT2BPETokenizer(args.vocab_file, args.merge_file)
elif args.tokenizer_type.lower() == "SPMTokenizer".lower():
assert args.vocab_file is not None
tokenizer = SentencePieceTokenizer(args.vocab_file)
elif args.tokenizer_type.lower() == "HFTokenizer".lower():
assert args.vocab_file is not None
tokenizer = HFTokenizer(args.vocab_file)
elif args.tokenizer_type.lower() == "HFGPT2Tokenizer".lower():
if args.vocab_file is None:
print(
"WARNING: No vocab file found, loading Huggingface's pretrained GPT2Tokenizer"
)
tokenizer = HFGPT2Tokenizer(args.vocab_file)
elif args.tokenizer_type.lower() == "CharLevelTokenizer".lower():
tokenizer = CharLevelTokenizer(vocab_size=512)
elif args.tokenizer_type.lower() == "TiktokenTokenizer".lower():
assert args.vocab_file is not None
tokenizer = TiktokenTokenizer(args.vocab_file)
elif args.tokenizer_type.lower() == "GLMTokenizer".lower():
if is_old_version(args.pretrained_model_path):
print('is an old version')
args.glm_mask = '[sMASK]'
old_version_tokenizer = True
tokenizer = GLMChineseTokenizer.from_pretrained(args.pretrained_model_path, trust_remote_code=True)
else:
print('is not an old version')
old_version_tokenizer = False
tokenizer = GLMTokenizer.from_pretrained(args.pretrained_model_path, trust_remote_code=True)
else:
raise NotImplementedError(
"{} tokenizer is not " "implemented.".format(args.tokenizer_type)
)
# Add vocab size.
args.padded_vocab_size = _vocab_size_with_padding(tokenizer.vocab_size, args)
return tokenizer
def _vocab_size_with_padding(orig_vocab_size, args):
"""Pad vocab size so it is divisible by model parallel size and
still having GPU friendly size."""
after = orig_vocab_size
| multiple = args.make_vocab_size_divisible_by * args.model_parallel_size |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Hritikbansal/videocon
# Path: training/pipeline_video/mplug_owl_video/configuration_mplug_owl.py
class MplugOwlConfig(PretrainedConfig):
r"""
[`MplugOwlConfig`] is the configuration class to store the configuration of a [`MplugOwlForConditionalGeneration`]. It is
used to instantiate a mPLUG-Owl model according to the specified arguments, defining the vision model, Q-Former model
and language model configs. Instantiating a configuration with the defaults will yield a similar configuration to
that of the mPLUG-Owl [x-plug/x_plug-llama-7b](https://huggingface.co/x-plug/x_plug-llama-7b) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`MplugOwlVisionConfig`].
visual_abstractor_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`MplugOwlVisualAbstractorConfig`].
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize any [`PretrainedConfig`].
num_query_tokens (`int`, *optional*, defaults to 32):
The number of query tokens passed through the Transformer.
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import (
... MplugOwlVisionConfig,
... MplugOwlVisualAbstractorConfig,
... OPTConfig,
... MplugOwlConfig,
... MplugOwlForConditionalGeneration,
... )
>>> # Initializing a MplugOwlConfig with x-plug/x_plug-llama-7b style configuration
>>> configuration = MplugOwlConfig()
>>> # Initializing a MplugOwlForConditionalGeneration (with random weights) from the x-plug/x_plug-llama-7b style configuration
>>> model = MplugOwlForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a MplugOwlConfig from a MplugOwlVisionConfig, MplugOwlVisualAbstractorConfig and any PretrainedConfig
>>> # Initializing mPLUG-Owl vision, mPLUG-Owl Q-Former and language model configurations
>>> vision_config = MplugOwlVisionConfig()
>>> visual_abstractor_config = MplugOwlVisualAbstractorConfig()
>>> text_config = OPTConfig()
>>> config = MplugOwlConfig.from_text_vision_configs(vision_config, visual_abstractor_config, text_config)
```"""
model_type = "mplug-owl"
is_composition = True
def __init__(
self, vision_config=None, visual_abstractor_config=None, text_config=None, num_query_tokens=64, **kwargs
):
super().__init__(**kwargs)
if vision_config is None:
vision_config = MplugOwlVisionConfig().to_dict()
logger.info("vision_config is None.")
if visual_abstractor_config is None:
visual_abstractor_config = {}
logger.info("abstractor_config is None. ")
if text_config is None:
# we use LLAMA 7b by default
from ..llama.configuration_llama import LlamaConfig
text_config = LlamaConfig(pad_token_id=2).to_dict()
logger.info("text_config is None.")
self.vision_config = MplugOwlVisionConfig(**vision_config)
self.visual_abstractor_config = MplugOwlVisualAbstractorConfig(**visual_abstractor_config)
# self.visual_abstractor_config.layer_norm_eps = 1e-6
text_model_type = text_config["model_type"] if "model_type" in text_config else "llama"
self.text_config = CONFIG_MAPPING[text_model_type](**text_config)
self.tie_word_embeddings = self.text_config.tie_word_embeddings
self.is_encoder_decoder = self.text_config.is_encoder_decoder
self.num_query_tokens = num_query_tokens
# self.visual_abstractor_config.encoder_hidden_size = self.vision_config.hidden_size
self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
self.initializer_factor = 1.0
self.initializer_range = 0.02
for attr in dir(self.text_config):
if not hasattr(self, attr):
setattr(self, attr, getattr(self.text_config, attr))
@classmethod
def from_vision_visual_abstractor_text_configs(
cls,
vision_config: MplugOwlVisionConfig,
visual_abstractor_config: MplugOwlVisualAbstractorConfig,
text_config: PretrainedConfig,
**kwargs,
):
r"""
Instantiate a [`MplugOwlConfig`] (or a derived class) from a mPLUG-Owl vision model, Q-Former and language model
configurations.
Returns:
[`MplugOwlConfig`]: An instance of a configuration object
"""
return cls(
vision_config=vision_config.to_dict(),
visual_abstractor_config=visual_abstractor_config.to_dict(),
text_config=text_config.to_dict(),
**kwargs,
)
def to_dict(self):
"""
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
Returns:
`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
"""
output = copy.deepcopy(self.__dict__)
output["vision_config"] = self.vision_config.to_dict()
output["visual_abstractor_config"] = self.visual_abstractor_config.to_dict()
output["text_config"] = self.text_config.to_dict()
output["model_type"] = self.__class__.model_type
return output
# Path: training/pipeline_video/mplug_owl_video/configuration_mplug_owl.py
class MplugOwlVisionConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`MplugOwlVisionModel`]. It is used to instantiate a
mPLUG-Owl vision encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration defaults will yield a similar configuration to that of the mPLUG-Owl
[x-plug/x_plug-llama-7b](https://huggingface.co/x-plug/x_plug-llama-7b) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 32):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-5):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float`, *optional*, defaults to 1):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
```"""
model_type = "mplug_owl_vision_model"
def __init__(
self,
hidden_size=1024,
intermediate_size=4096,
projection_dim=768,
num_hidden_layers=24,
num_attention_heads=16,
num_channels=3,
image_size=224,
patch_size=14,
hidden_act="quick_gelu",
layer_norm_eps=1e-6,
attention_dropout=0.0,
initializer_range=0.02,
initializer_factor=1.0,
use_flash_attn=False,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.patch_size = patch_size
self.image_size = image_size
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.attention_dropout = attention_dropout
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.use_flash_attn = use_flash_attn
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
# get the vision config dict if we are loading from MplugOwlConfig
if config_dict.get("model_type") == "mplug-owl":
config_dict = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
)
return cls.from_dict(config_dict, **kwargs)
# Path: training/pipeline_video/mplug_owl_video/configuration_mplug_owl.py
class MplugOwlVisualAbstractorConfig(PretrainedConfig):
model_type = "mplug_owl_visual_abstract"
def __init__(
self,
hidden_size=1024, #
num_hidden_layers=6, #
num_attention_heads=16, #
intermediate_size=4096, #
attention_probs_dropout_prob=0.1, #
initializer_range=0.02,
layer_norm_eps=1e-6, #
encoder_hidden_size=1024, #
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.encoder_hidden_size = encoder_hidden_size
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
# get the visual_abstractor config dict if we are loading from MplugOwlConfig
if config_dict.get("model_type") == "mplug-owl":
config_dict = config_dict["abstractor_config"]
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
)
return cls.from_dict(config_dict, **kwargs)
# Path: training/pipeline_video/mplug_owl_video/modeling_mplug_owl.py
import logging
import math
import math
import torch
import torch.utils.checkpoint
import einops
from typing import Any, Optional, Tuple, Union
from flash_attn.flash_attn_interface import flash_attn_unpadded_func
from dataclasses import dataclass
from typing import Any, Optional, Tuple, Union
from torch import nn
from transformers.modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPooling,
BaseModelOutputWithPastAndCrossAttentions
)
from transformers.modeling_utils import PreTrainedModel
from transformers.pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from transformers.utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from transformers.models.auto import AutoModelForCausalLM
from .configuration_mplug_owl import MplugOwlConfig, MplugOwlVisionConfig, MplugOwlVisualAbstractorConfig
from transformers import GenerationConfig
output_attentions=output_attentions,
)
hidden_states = hidden_states + residual
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = hidden_states + residual
hidden_states = einops.rearrange(hidden_states, '(b t) n d -> b t n d', b=B)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class MplugOwlPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = MplugOwlConfig
base_model_prefix = "mplug_owl"
supports_gradient_checkpointing = True
_keys_to_ignore_on_load_missing = [
r"position_ids",
r"language_model.encoder.embed_tokens.weight",
r"language_model.decoder.embed_tokens.weight",
r"language_model.lm_head.weight",
]
_no_split_modules = [
"MplugOwlVisionEncoderLayer",
"LlamaDecoderLayer",
"MplugOwlVisualAbstractorLayer",
"LlamaForCausalLM",
"Parameter",
]
_keep_in_fp32_modules = ["wo"]
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_range
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Embedding) or isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=factor)
if hasattr(module, "bias") and module.bias is not None:
module.bias.data.zero_()
if isinstance(module, MplugOwlVisionEmbeddings):
if hasattr(self.config, "vision_config"):
factor = self.config.vision_config.initializer_range
nn.init.trunc_normal_(module.position_embedding, mean=0.0, std=factor)
nn.init.trunc_normal_(module.cls_token, mean=0.0, std=factor)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Parameter):
raise ValueError
nn.init.trunc_normal_(module.data, mean=0.0, std=factor)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, MplugOwlVisionEncoder):
module.gradient_checkpointing = value
MPLUG_OWL_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`MplugOwlConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
MPLUG_OWL_VISION_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`MplugOwlProcessor`]. See [`MplugOwlProcessor.__call__`] for
details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
MPLUG_OWL_TEXT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
| is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: xyongLu/SBCFormer
# Path: mixup.py
class Mixup:
""" Mixup/Cutmix that applies different params to each element or whole batch
Args:
mixup_alpha (float): mixup alpha value, mixup is active if > 0.
cutmix_alpha (float): cutmix alpha value, cutmix is active if > 0.
cutmix_minmax (List[float]): cutmix min/max image ratio, cutmix is active and uses this vs alpha if not None.
prob (float): probability of applying mixup or cutmix per batch or element
switch_prob (float): probability of switching to cutmix instead of mixup when both are active
mode (str): how to apply mixup/cutmix params (per 'batch', 'pair' (pair of elements), 'elem' (element)
correct_lam (bool): apply lambda correction when cutmix bbox clipped by image borders
label_smoothing (float): apply label smoothing to the mixed target tensor
num_classes (int): number of classes for target
"""
def __init__(self, mixup_alpha=1., cutmix_alpha=0., cutmix_minmax=None, prob=1.0, switch_prob=0.5,
mode='batch', correct_lam=True, label_smoothing=0.1, num_classes=1000):
self.mixup_alpha = mixup_alpha
self.cutmix_alpha = cutmix_alpha
self.cutmix_minmax = cutmix_minmax
if self.cutmix_minmax is not None:
assert len(self.cutmix_minmax) == 2
# force cutmix alpha == 1.0 when minmax active to keep logic simple & safe
self.cutmix_alpha = 1.0
self.mix_prob = prob
self.switch_prob = switch_prob
self.label_smoothing = label_smoothing
self.num_classes = num_classes
self.mode = mode
self.correct_lam = correct_lam # correct lambda based on clipped area for cutmix
self.mixup_enabled = True # set to false to disable mixing (intended tp be set by train loop)
def _params_per_elem(self, batch_size):
lam = np.ones(batch_size, dtype=np.float32)
use_cutmix = np.zeros(batch_size, dtype=np.bool)
if self.mixup_enabled:
if self.mixup_alpha > 0. and self.cutmix_alpha > 0.:
use_cutmix = np.random.rand(batch_size) < self.switch_prob
lam_mix = np.where(
use_cutmix,
np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size),
np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size))
elif self.mixup_alpha > 0.:
lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size)
elif self.cutmix_alpha > 0.:
use_cutmix = np.ones(batch_size, dtype=np.bool)
lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size)
else:
assert False, "One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true."
lam = np.where(np.random.rand(batch_size) < self.mix_prob, lam_mix.astype(np.float32), lam)
return lam, use_cutmix
def _params_per_batch(self):
lam = 1.
use_cutmix = False
if self.mixup_enabled and np.random.rand() < self.mix_prob:
if self.mixup_alpha > 0. and self.cutmix_alpha > 0.:
use_cutmix = np.random.rand() < self.switch_prob
lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha) if use_cutmix else \
np.random.beta(self.mixup_alpha, self.mixup_alpha)
elif self.mixup_alpha > 0.:
lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha)
elif self.cutmix_alpha > 0.:
use_cutmix = True
lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha)
else:
assert False, "One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true."
lam = float(lam_mix)
return lam, use_cutmix
def _mix_elem(self, x):
batch_size = len(x)
lam_batch, use_cutmix = self._params_per_elem(batch_size)
x_orig = x.clone() # need to keep an unmodified original for mixing source
for i in range(batch_size):
j = batch_size - i - 1
lam = lam_batch[i]
if lam != 1.:
if use_cutmix[i]:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]
lam_batch[i] = lam
else:
x[i] = x[i] * lam + x_orig[j] * (1 - lam)
return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1)
def _mix_pair(self, x):
batch_size = len(x)
lam_batch, use_cutmix = self._params_per_elem(batch_size // 2)
x_orig = x.clone() # need to keep an unmodified original for mixing source
for i in range(batch_size // 2):
j = batch_size - i - 1
lam = lam_batch[i]
if lam != 1.:
if use_cutmix[i]:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]
x[j][:, yl:yh, xl:xh] = x_orig[i][:, yl:yh, xl:xh]
lam_batch[i] = lam
else:
x[i] = x[i] * lam + x_orig[j] * (1 - lam)
x[j] = x[j] * lam + x_orig[i] * (1 - lam)
lam_batch = np.concatenate((lam_batch, lam_batch[::-1]))
return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1)
def _mix_batch(self, x):
lam, use_cutmix = self._params_per_batch()
if lam == 1.:
return 1.
if use_cutmix:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
x.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
x[:, :, yl:yh, xl:xh] = x.flip(0)[:, :, yl:yh, xl:xh]
else:
x_flipped = x.flip(0).mul_(1. - lam)
x.mul_(lam).add_(x_flipped)
return lam
def __call__(self, x, target):
assert len(x) % 2 == 0, 'Batch size should be even when using this'
if self.mode == 'elem':
lam = self._mix_elem(x)
elif self.mode == 'pair':
lam = self._mix_pair(x)
else:
lam = self._mix_batch(x)
target = mixup_target(target, self.num_classes, lam, self.label_smoothing, x.device)
return x, target
# Path: datasets.py
def build_dataset(is_train, args):
if args.data_set == 'CIFAR10':
if is_train:
transform = transforms.Compose([
transforms.Resize(args.input_size),
transforms.RandomCrop(args.input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR10_DEFAULT_MEAN, CIFAR10_DEFAULT_STD)
])
else:
transform = transforms.Compose([
transforms.Resize(args.input_size),
transforms.ToTensor(),
transforms.Normalize(CIFAR10_DEFAULT_MEAN, CIFAR10_DEFAULT_STD)
])
dataset = datasets.CIFAR10(args.data_path, train=is_train, download=True, transform=transform)
nb_classes = 10
elif args.data_set == 'CIFAR100':
if is_train:
transform = transforms.Compose([
transforms.Resize(args.input_size),
transforms.RandomCrop(args.input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR100_DEFAULT_MEAN, CIFAR100_DEFAULT_STD)
])
else:
transform = transforms.Compose([
transforms.Resize(args.input_size),
transforms.ToTensor(),
transforms.Normalize(CIFAR100_DEFAULT_MEAN, CIFAR100_DEFAULT_STD)
])
dataset = datasets.CIFAR100(args.data_path, train=is_train, download=True, transform=transform)
nb_classes = 100
elif args.data_set == 'IMNET':
transform = build_transform(is_train, args)
root = os.path.join(args.data_path, 'train' if is_train else 'val')
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 1000
elif args.data_set == 'INAT':
transform = build_transform(is_train, args)
dataset = INatDataset(args.data_path, train=is_train, year=2018,
category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
elif args.data_set == 'INAT19':
transform = build_transform(is_train, args)
dataset = INatDataset(args.data_path, train=is_train, year=2019,
category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
return dataset, nb_classes
# Path: engine.py
def train_one_epoch(model: torch.nn.Module, criterion: DistillationLoss,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None,
set_training_mode=True):
model.train(set_training_mode)
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
with torch.cuda.amp.autocast():
outputs = model(samples)
loss = criterion(samples, outputs, targets)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
optimizer.zero_grad()
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order)
torch.cuda.synchronize()
if model_ema is not None:
model_ema.update(model)
metric_logger.update(loss=loss_value)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
# Path: engine.py
@torch.no_grad()
def evaluate(data_loader, model, device):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
print_freq = 10
# switch to evaluation mode
model.eval()
for images, target in metric_logger.log_every(data_loader, print_freq, header):
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
# Path: losses.py
class DistillationLoss(torch.nn.Module):
"""
This module wraps a standard criterion and adds an extra knowledge distillation loss by
taking a teacher model prediction and using it as additional supervision.
"""
def __init__(self, base_criterion: torch.nn.Module, teacher_model: torch.nn.Module,
distillation_type: str, alpha: float, tau: float):
super().__init__()
self.base_criterion = base_criterion
self.teacher_model = teacher_model
assert distillation_type in ['none', 'soft', 'hard']
self.distillation_type = distillation_type
self.alpha = alpha
self.tau = tau
def forward(self, inputs, outputs, labels):
"""
Args:
inputs: The original inputs that are feed to the teacher model
outputs: the outputs of the model to be trained. It is expected to be
either a Tensor, or a Tuple[Tensor, Tensor], with the original output
in the first position and the distillation predictions as the second output
labels: the labels for the base criterion
"""
outputs_kd = None
if not isinstance(outputs, torch.Tensor):
# assume that the model outputs a tuple of [outputs, outputs_kd]
outputs, outputs_kd = outputs
base_loss = self.base_criterion(outputs, labels)
if self.distillation_type == 'none':
return base_loss
if outputs_kd is None:
raise ValueError("When knowledge distillation is enabled, the model is "
"expected to return a Tuple[Tensor, Tensor] with the output of the "
"class_token and the dist_token")
# don't backprop throught the teacher
with torch.no_grad():
teacher_outputs = self.teacher_model(inputs)
if self.distillation_type == 'soft':
T = self.tau
# taken from https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py#L100
# with slight modifications
distillation_loss = F.kl_div(
F.log_softmax(outputs_kd / T, dim=1),
#We provide the teacher's targets in log probability because we use log_target=True
#(as recommended in pytorch https://github.com/pytorch/pytorch/blob/9324181d0ac7b4f7949a574dbc3e8be30abe7041/torch/nn/functional.py#L2719)
#but it is possible to give just the probabilities and set log_target=False. In our experiments we tried both.
F.log_softmax(teacher_outputs / T, dim=1),
reduction='sum',
log_target=True
) * (T * T) / outputs_kd.numel()
#We divide by outputs_kd.numel() to have the legacy PyTorch behavior.
#But we also experiments output_kd.size(0)
#see issue 61(https://github.com/facebookresearch/deit/issues/61) for more details
elif self.distillation_type == 'hard':
distillation_loss = F.cross_entropy(outputs_kd, teacher_outputs.argmax(dim=1))
loss = base_loss * (1 - self.alpha) + distillation_loss * self.alpha
return loss
# Path: samplers.py
class RASampler(torch.utils.data.Sampler):
"""Sampler that restricts data loading to a subset of the dataset for distributed,
with repeated augmentation.
It ensures that different each augmented version of a sample will be visible to a
different process (GPU)
Heavily based on torch.utils.data.DistributedSampler
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 3.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
# self.num_selected_samples = int(math.ceil(len(self.dataset) / self.num_replicas))
self.num_selected_samples = int(math.floor(len(self.dataset) // 256 * 256 / self.num_replicas))
self.shuffle = shuffle
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
if self.shuffle:
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices = [ele for ele in indices for i in range(3)]
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices[:self.num_selected_samples])
def __len__(self):
return self.num_selected_samples
def set_epoch(self, epoch):
self.epoch = epoch
# Path: main.py
import argparse
import datetime
import numpy as np
import time
import torch
import torch.backends.cudnn as cudnn
import json
import utils
from pathlib import Path
from mixup import Mixup
from timm.models import create_model
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.scheduler import create_scheduler
from timm.optim import create_optimizer
from timm.utils import NativeScaler, get_state_dict, ModelEma
from datasets import build_dataset
from engine import train_one_epoch, evaluate
from losses import DistillationLoss
from samplers import RASampler
from models import *
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON', help='Optimizer Epsilon (defaudevice = torch.device(args.device)ult: None, no clipping)')
parser.add_argument('--clip-grad', type=float, default=5, metavar='NORM', help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.05, help='weight decay (default: 0.05)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER', help='LR scheduler (default: "cosine"')
parser.add_argument('--lr', type=float, default=2.5e-4, metavar='LR', help='learning rate (default: 5e-4)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct', help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT', help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV', help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR', help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N', help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N', help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N', help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N', help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE', help='LR decay rate (default: 0.1)')
# Augmentation parameters
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT', help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='bicubic', help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=False)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# Distillation parameters distilled
parser.add_argument('--distilled', action='store_true', default=False, help='Perform distilled ')
parser.add_argument('--teacher-model', default='regnety_200mf', type=str, metavar='MODEL',
help='Name of teacher model to train (default: "regnety_160"')
parser.add_argument('--teacher-path', type=str, default='')
parser.add_argument('--distillation-type', default='none', choices=['none', 'soft', 'hard'], type=str, help="")
parser.add_argument('--distillation-alpha', default=0.5, type=float, help="")
parser.add_argument('--distillation-tau', default=1.0, type=float, help="")
# Finetuning params
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
# Dataset parameters
parser.add_argument('--data-path', default= '../../PythonWork_E/Data/ImageNet_2012',#'./data', type=str,
help='dataset path')
parser.add_argument('--data-set', default='IMNET', choices=['CIFAR10', 'CIFAR100' , 'IMNET'],
type=str, help='Image Net dataset path')
parser.add_argument('--inat-category', default='name',
choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'],
type=str, help='semantic granularity')
parser.add_argument('--output_dir', default='./outputs', help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda', help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default= '', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true', default=False, help='Perform evaluation only')
parser.add_argument('--dist-eval', action='store_true', default=False, help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin-mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem',
help='')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
# test throught
parser.add_argument('--throughout', action='store_true', help='Perform throughout only')
return parser
@torch.no_grad()
def throughput(data_loader, model, logger):
model.eval()
for _, (images, _) in enumerate(data_loader):
images = images.cuda(non_blocking=True)
batch_size = images.shape[0]
for i in range(50):
model(images)
torch.cuda.synchronize()
logger.info(f"throughput averaged with 30 times")
tic1 = time.time()
for i in range(30):
model(images)
torch.cuda.synchronize()
tic2 = time.time()
logger.info(f"batch_size {batch_size} throughput {30 * batch_size / (tic2 - tic1)}")
return
def main(args):
| utils.init_distributed_mode(args) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: VILA-Lab/GBLM-Pruner
# Path: lib/prune.py
def prune_wanda(args, model, tokenizer, device=torch.device("cuda:0"), prune_n=0, prune_m=0, layer_no=-1):
use_cache = model.config.use_cache
model.config.use_cache = False
print("loading calibdation data")
dataloader, _ = get_loaders("c4",nsamples=args.nsamples,seed=args.seed,seqlen=2048,tokenizer=tokenizer)
print("dataset loading complete")
with torch.no_grad():
inps, outs, attention_mask, position_ids = prepare_calibration_input(model, dataloader, args.nsamples, device)
layers = model.model.layers
for i in range(len(layers)):
layer = layers[i]
subset = find_layers(layer)
if f"model.layers.{i}" in model.hf_device_map: ## handle the case for llama-30B and llama-65B, when the device map has multiple GPUs;
dev = model.hf_device_map[f"model.layers.{i}"]
inps, outs, attention_mask, position_ids = inps.to(dev), outs.to(dev), attention_mask.to(dev), position_ids.to(dev)
wrapped_layers = {}
for name in subset:
wrapped_layers[name] = WrappedGPT(subset[name], layer_id=i, layer_name=name)
def add_batch(name):
def tmp(_, inp, out):
wrapped_layers[name].add_batch(inp[0].data, out.data)
return tmp
handles = []
for name in wrapped_layers:
handles.append(subset[name].register_forward_hook(add_batch(name))) ## this is a important function.
for j in range(args.nsamples):
with torch.no_grad():
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask, position_ids=position_ids)[0]
for h in handles:
h.remove()
for sub_i, name in enumerate(subset):
print(f"pruning layer {i} name {name}")
W_metric = torch.abs(subset[name].weight.data) * torch.sqrt(wrapped_layers[name].scaler_row.reshape((1,-1)))
W_mask = (torch.zeros_like(W_metric) == 1) ## initialize a mask to be all False
if prune_n != 0:
# structured n:m sparsity
for ii in range(W_metric.shape[1]):
if ii % prune_m == 0:
tmp = W_metric[:,ii:(ii+prune_m)].float()
W_mask.scatter_(1,ii+torch.topk(tmp, prune_n,dim=1, largest=False)[1], True)
else:
sort_res = torch.sort(W_metric, dim=-1, stable=True)
if args.use_variant:
# wanda variant
tmp_metric = torch.cumsum(sort_res[0], dim=1)
sum_before = W_metric.sum(dim=1)
alpha = 0.4
alpha_hist = [0., 0.8]
W_mask, cur_sparsity = return_given_alpha(alpha, sort_res, W_metric, tmp_metric, sum_before)
while (torch.abs(cur_sparsity - args.sparsity_ratio)>0.001) and (alpha_hist[1]-alpha_hist[0]>=0.001):
if cur_sparsity > args.sparsity_ratio:
alpha_new = (alpha + alpha_hist[0]) / 2.0
alpha_hist[1] = alpha
else:
alpha_new = (alpha + alpha_hist[1]) / 2.0
alpha_hist[0] = alpha
alpha = alpha_new
W_mask, cur_sparsity = return_given_alpha(alpha, sort_res, W_metric, tmp_metric, sum_before)
print(f"alpha found {alpha} sparsity {cur_sparsity:.6f}")
else:
# unstructured pruning
indices = sort_res[1][:,:int(W_metric.shape[1]*args.sparsity_ratio)]
W_mask.scatter_(1, indices, True)
subset[name].weight.data[W_mask] = 0 ## set weights to zero
for j in range(args.nsamples):
with torch.no_grad():
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask, position_ids=position_ids)[0]
inps, outs = outs, inps
model.config.use_cache = use_cache
torch.cuda.empty_cache()
# Path: lib/prune.py
def prune_magnitude(args, model, tokenizer, device=torch.device("cuda:0"), prune_n=0, prune_m=0, layer_no=-1):
layers = model.model.layers
for i in range(len(layers)):
layer = layers[i]
subset = find_layers(layer)
for name in subset:
W = subset[name].weight.data
W_metric = torch.abs(W)
if prune_n != 0:
W_mask = (torch.zeros_like(W)==1)
for ii in range(W_metric.shape[1]):
if ii % prune_m == 0:
tmp = W_metric[:,ii:(ii+prune_m)].float()
W_mask.scatter_(1,ii+torch.topk(tmp, prune_n,dim=1, largest=False)[1], True)
else:
# thresh = torch.sort(W_metric.flatten().cuda())[0][int(W.numel()*args.sparsity_ratio)].cpu()
thresh = torch.sort(W_metric.flatten())[0][int(W_metric.numel()*args.sparsity_ratio)].cpu()
W_mask = (W_metric<=thresh)
W[W_mask] = 0
# Path: lib/prune.py
@torch.no_grad()
def prune_sparsegpt(args, model, tokenizer, dev, prune_n=0, prune_m=0, layer_no=-1):
## SparseGPT code available at: https://github.com/IST-DASLab/sparsegpt/tree/f5c25005a61f96a0933ca2f95705a963585aafaa
print('Starting ...')
dataloader, _ = get_loaders("c4",nsamples=args.nsamples,seed=args.seed,seqlen=2048,tokenizer=tokenizer)
use_cache = model.config.use_cache
model.config.use_cache = False
layers = model.model.layers
if "model.embed_tokens" in model.hf_device_map:
dev = model.hf_device_map["model.embed_tokens"]
dtype = next(iter(model.parameters())).dtype
inps = torch.zeros(
(args.nsamples, model.seqlen, model.config.hidden_size), dtype=dtype, device=dev
)
cache = {'i': 0, 'attention_mask': None, "position_ids": None}
class Catcher(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, inp, **kwargs):
inps[cache['i']] = inp
cache['i'] += 1
cache['attention_mask'] = kwargs['attention_mask']
cache['position_ids'] = kwargs['position_ids']
raise ValueError
layers[0] = Catcher(layers[0])
for batch in dataloader:
try:
model(batch[0].to(dev))
except ValueError:
pass
layers[0] = layers[0].module
torch.cuda.empty_cache()
outs = torch.zeros_like(inps)
attention_mask = cache['attention_mask']
position_ids = cache['position_ids']
print('Ready.')
for i in range(len(layers)):
layer = layers[i]
if f"model.layers.{i}" in model.hf_device_map:
dev = model.hf_device_map[f"model.layers.{i}"]
print(f"layer {i} device {dev}")
inps, outs, attention_mask, position_ids = inps.to(dev), outs.to(dev), attention_mask.to(dev), position_ids.to(dev)
subset = find_layers(layer)
gpts = {}
for name in subset:
gpts[name] = SparseGPT(subset[name])
def add_batch(name):
def tmp(_, inp, out):
gpts[name].add_batch(inp[0].data, out.data)
return tmp
handles = []
for name in gpts:
handles.append(subset[name].register_forward_hook(add_batch(name)))
for j in range(args.nsamples):
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask, position_ids=position_ids)[0]
for h in handles:
h.remove()
for name in gpts:
print(i, name)
print('Pruning ...')
gpts[name].fasterprune(args.sparsity_ratio, prune_n=prune_n, prune_m=prune_m, percdamp=0.01, blocksize=128)
gpts[name].free()
for j in range(args.nsamples):
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask, position_ids=position_ids)[0]
layers[i] = layer
torch.cuda.empty_cache()
inps, outs = outs, inps
model.config.use_cache = use_cache
torch.cuda.empty_cache()
# Path: lib/prune.py
def check_sparsity(model, args):
use_cache = model.config.use_cache
model.config.use_cache = False
layers = model.model.layers
count = 0
total_params = 0
for i in range(len(layers)):
layer = layers[i]
subset = find_layers(layer)
sub_count = 0
sub_params = 0
for name in subset:
W = subset[name].weight.data
count += (W==0).sum().item()
total_params += W.numel()
sub_count += (W==0).sum().item()
sub_params += W.numel()
print(f"layer {i} sparsity {float(sub_count)/sub_params:.6f}")
model.config.use_cache = use_cache
return float(count)/total_params
# Path: lib/prune.py
def find_layers(module, layers=[nn.Linear], name=''):
"""
Recursively find the layers of a certain type in a module.
Args:
module (nn.Module): PyTorch module.
layers (list): List of layer types to find.
name (str): Name of the module.
Returns:
dict: Dictionary of layers of the given type(s) within the module.
"""
if type(module) in layers:
return {name: module}
res = {}
for name1, child in module.named_children():
res.update(find_layers(
child, layers=layers, name=name + '.' + name1 if name != '' else name1
))
return res
# Path: lib/prune.py
def prune_gradient(args, model, tokenizer, device=torch.device("cuda:0"), prune_n=0, prune_m=0, layer_no=-1):
layers = model.model.layers
with open(args.gradient_path, 'rb') as file:
gradients = torch.load(args.gradient_path, map_location=torch.device('cpu'))
for i in range(len(layers)):
layer = layers[i]
subset = find_layers(layer)
for name in subset:
indexed_name = f"{name}_layer_{i}"
W = subset[name].weight.data
W_metric = torch.abs(W)
if not args.gradient_inv:
W_metric = W_metric.to(dtype=torch.float32) * torch.abs(gradients[indexed_name].to(device=W_metric.device)).to(dtype=torch.float32)#+ small_value)
else:
small_value = torch.tensor(1e-8, dtype=gradients[indexed_name].dtype, device=gradients[indexed_name].device)
gradient_inv = 1 / (torch.abs(gradients[indexed_name]) + small_value)
W_metric = W_metric.to(dtype=torch.float32) * gradient_inv.to(device=W_metric.device).to(dtype=torch.float32)
W_mask = (torch.zeros_like(W)==1)
if prune_n != 0:
for ii in range(W_metric.shape[1]):
if ii % prune_m == 0:
tmp = W_metric[:,ii:(ii+prune_m)].float()
W_mask.scatter_(1,ii+torch.topk(tmp, prune_n,dim=1, largest=False)[1], True)
else:
sort_res = torch.sort(W_metric, dim=-1, stable=True)
indices = sort_res[1][:,:int(W_metric.shape[1]*args.sparsity_ratio)]
W_mask.scatter_(1, indices, True)
W[W_mask] = 0
# Path: lib/prune.py
def prune_gblm(args, model, tokenizer, device=torch.device("cuda:0"), prune_n=0, prune_m=0, layer_no=-1):
use_cache = model.config.use_cache
model.config.use_cache = False
with open(args.gradient_path, 'rb') as file:
gradients = torch.load(args.gradient_path, map_location=torch.device('cpu'))
print("loading calibdation data")
dataloader, _ = get_loaders("c4",nsamples=args.nsamples,seed=args.seed,seqlen=2048,tokenizer=tokenizer)
print("dataset loading complete")
with torch.no_grad():
inps, outs, attention_mask, position_ids = prepare_calibration_input(model, dataloader, args.nsamples, device)
layers = model.model.layers
for i in range(len(layers)):
layer = layers[i]
subset = find_layers(layer)
if f"model.layers.{i}" in model.hf_device_map: ## handle the case for llama-30B and llama-65B, when the device map has multiple GPUs;
dev = model.hf_device_map[f"model.layers.{i}"]
inps, outs, attention_mask, position_ids = inps.to(dev), outs.to(dev), attention_mask.to(dev), position_ids.to(dev)
wrapped_layers = {}
for name in subset:
wrapped_layers[name] = WrappedGPT(subset[name], layer_id=i, layer_name=name)
def add_batch(name):
def tmp(_, inp, out):
wrapped_layers[name].add_batch(inp[0].data, out.data)
return tmp
handles = []
for name in wrapped_layers:
handles.append(subset[name].register_forward_hook(add_batch(name))) ## this is a important function.
for j in range(args.nsamples):
with torch.no_grad():
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask, position_ids=position_ids)[0]
for h in handles:
h.remove()
for sub_i, name in enumerate(subset):
indexed_name = f"{name}_layer_{i}"
print(f"pruning layer {i} name {name}")
W_metric = torch.abs(subset[name].weight.data) * torch.sqrt(wrapped_layers[name].scaler_row.reshape((1,-1)))
if not args.gradient_inv:
# small_value = torch.tensor(1e-8, dtype=gradients[indexed_name].dtype, device=gradients[indexed_name].device)
W_metric_grad = torch.abs(subset[name].weight.data)* torch.abs(gradients[indexed_name].to(device=W_metric.device))
W_metric = W_metric.to(dtype=torch.float32) + W_metric_grad.to(dtype=torch.float32) #+ small_value)
else:
small_value = torch.tensor(1e-8, dtype=gradients[indexed_name].dtype, device=gradients[indexed_name].device)
gradient_inv = 1 / (torch.abs(gradients[indexed_name]) + small_value)
W_metric = W_metric.to(dtype=torch.float32) * gradient_inv.to(device=W_metric.device).to(dtype=torch.float32)
W_mask = (torch.zeros_like(W_metric) == 1) ## initialize a mask to be all False
if prune_n != 0:
# structured n:m sparsity
for ii in range(W_metric.shape[1]):
if ii % prune_m == 0:
tmp = W_metric[:,ii:(ii+prune_m)].float()
W_mask.scatter_(1,ii+torch.topk(tmp, prune_n,dim=1, largest=False)[1], True)
else:
sort_res = torch.sort(W_metric, dim=-1, stable=True)
if args.use_variant:
# wanda variant
tmp_metric = torch.cumsum(sort_res[0], dim=1)
sum_before = W_metric.sum(dim=1)
alpha = 0.4
alpha_hist = [0., 0.8]
W_mask, cur_sparsity = return_given_alpha(alpha, sort_res, W_metric, tmp_metric, sum_before)
while (torch.abs(cur_sparsity - args.sparsity_ratio)>0.001) and (alpha_hist[1]-alpha_hist[0]>=0.001):
if cur_sparsity > args.sparsity_ratio:
alpha_new = (alpha + alpha_hist[0]) / 2.0
alpha_hist[1] = alpha
else:
alpha_new = (alpha + alpha_hist[1]) / 2.0
alpha_hist[0] = alpha
alpha = alpha_new
W_mask, cur_sparsity = return_given_alpha(alpha, sort_res, W_metric, tmp_metric, sum_before)
print(f"alpha found {alpha} sparsity {cur_sparsity:.6f}")
else:
# unstructured pruning
indices = sort_res[1][:,:int(W_metric.shape[1]*args.sparsity_ratio)]
W_mask.scatter_(1, indices, True)
subset[name].weight.data[W_mask] = 0 ## set weights to zero
for j in range(args.nsamples):
with torch.no_grad():
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask, position_ids=position_ids)[0]
inps, outs = outs, inps
model.config.use_cache = use_cache
torch.cuda.empty_cache()
# Path: lib/eval.py
def eval_ppl(model, tokenizer, device=torch.device("cuda:0")):
# Set dataset
dataset = "wikitext2"
# Print status
print(f"evaluating on {dataset}")
# Get the test loader
_, testloader = get_loaders(
dataset, seed=0, seqlen=model.seqlen, tokenizer=tokenizer
)
# Evaluate ppl in no grad context to avoid updating the model
with torch.no_grad():
ppl = eval_ppl_wikitext(model, testloader, 1, device)
return ppl
# Path: main.py
import argparse
import os
import numpy as np
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaTokenizer
from importlib.metadata import version
from lib.prune import prune_wanda, prune_magnitude, prune_sparsegpt, check_sparsity, find_layers, prune_gradient, prune_gblm
from lib.eval import eval_ppl
print('torch', version('torch'))
print('transformers', version('transformers'))
print('accelerate', version('accelerate'))
print('# of gpus: ', torch.cuda.device_count())
def get_llm(model, cache_dir="llm_weights"):
model = AutoModelForCausalLM.from_pretrained(
model,
torch_dtype=torch.float16,
cache_dir=cache_dir,
low_cpu_mem_usage=True,
device_map="auto"
)
print("printing gpu allocation for all the layers")
print(model.hf_device_map)
model.seqlen = 2048
return model
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, help='LLaMA model')
parser.add_argument('--gradient_path', default=None,type=str, help='gradient path')
parser.add_argument('--grad_norm', type=str, default="none", choices=["none", "accumulation_norm", "2-norm-sample-dim"])
parser.add_argument('--seed', type=int, default=0, help='Seed for sampling the calibration data.')
parser.add_argument('--nsamples', type=int, default=128, help='Number of calibration samples.')
parser.add_argument('--seq_length', type=int, default=2048, help='Sequence length of the input.')
parser.add_argument('--sparsity_ratio', type=float, default=0, help='Sparsity level')
parser.add_argument('--layer_no', type=int, default=-1, help='Sparsity level')
parser.add_argument("--sparsity_type", type=str, choices=["unstructured", "4:8", "2:4"])
parser.add_argument("--prune_method", type=str, choices=["magnitude", "wanda", "sparsegpt","gradient", "gblm"])
parser.add_argument("--cache_dir", default="llm_weights", type=str )
parser.add_argument('--use_variant', action="store_true", help="whether to use the wanda variant described in the appendix")
parser.add_argument('--save', type=str, default=None, help='Path to save results.')
parser.add_argument('--save_model', type=str, default=None, help='Path to save the pruned model.')
parser.add_argument('--grad_exponent', action='store_true', help='Use gradient of exponent')
parser.add_argument('--gradient_inv', action='store_true', help='Use inverse of gradient')
args = parser.parse_args()
print(f"Working on model: {args.model}")
print(f"working on method {args.prune_method}, grad norm {args.grad_norm}, gradient path {args.gradient_path}, inverse enabled {args.gradient_inv}, sparsity type {args.sparsity_type}, seq lenght {args.seq_length}")
# Setting seeds for reproducibility
np.random.seed(args.seed)
torch.random.manual_seed(args.seed)
# Handling n:m sparsity
prune_n, prune_m = 0, 0
if args.sparsity_type != "unstructured":
assert args.sparsity_ratio == 0.5, "sparsity ratio must be 0.5 for structured N:M sparsity"
prune_n, prune_m = map(int, args.sparsity_type.split(":"))
model_name = args.model.split("/")[-1]
print(f"loading llm model {args.model}")
model = get_llm(args.model, args.cache_dir)
model.eval()
tokenizer = LlamaTokenizer.from_pretrained(args.model, use_fast=False)
device = torch.device("cuda:0")
if "30b" in args.model or "65b" in args.model or "70b" in args.model:
device = model.hf_device_map["lm_head"]
print("use device ", device)
idx = args.layer_no
print(f"pruning for sparsity_ratio {args.sparsity_ratio} by method {args.prune_method}")
if args.sparsity_ratio != 0:
print("pruning starts")
if args.prune_method == "wanda":
prune_wanda(args, model, tokenizer, device, prune_n=prune_n, prune_m=prune_m, layer_no=idx)
elif args.prune_method == "gblm":
prune_gblm(args, model, tokenizer, device, prune_n=prune_n, prune_m=prune_m, layer_no=idx)
| elif args.prune_method == "magnitude": |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: zamaniamin/fastapi-shop
# Path: apps/accounts/models.py
class User(FastModel):
"""
User represents registered users in the application.
Attributes:
id (int): Unique identifier for the user.
email (str): User's email address used for authentication and communication.
password (str): Hashed password for user authentication.
first_name (str, optional): User's first name. Default is None.
last_name (str, optional): User's last name. Default is None.
is_verified_email (bool): Flag indicating whether the user's email address has been verified.
is_active (bool): Flag indicating whether the user's account is active.
is_superuser (bool): Flag indicating whether the user has superuser privileges.
role (str): User's role in the system, represented as a short string.
date_joined (datetime): Timestamp indicating when the user account was created.
updated_at (datetime, optional): Timestamp indicating when the user account was last updated. Default is None.
last_login (datetime, optional): Timestamp indicating the user's last login time. Default is None.
change (relationship): Relationship attribute linking this user to change requests initiated by the user.
"""
__tablename__ = "users"
id = Column(Integer, primary_key=True)
email = Column(String(256), nullable=False, unique=True)
password = Column(String, nullable=False)
first_name = Column(String(256), nullable=True)
last_name = Column(String(256), nullable=True)
is_verified_email = Column(Boolean, default=False)
is_active = Column(Boolean, default=False)
is_superuser = Column(Boolean, default=False)
# TODO add unittest and check the default role is 'user', also move role to permissions table
role = Column(String(5), default="user")
date_joined = Column(DateTime, server_default=func.now())
updated_at = Column(DateTime, nullable=True, onupdate=func.now())
last_login = Column(DateTime, nullable=True)
change = relationship("UserVerification", back_populates="user", cascade="all, delete-orphan")
# Path: apps/accounts/services/password.py
class PasswordManager:
password_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
min_length: int = 8
max_length: int = 24
@classmethod
def validate_password_strength(cls, password: str, has_number: bool = True, has_lowercase: bool = True,
has_uppercase: bool = True, has_special_char: bool = True) -> str:
"""
Validate a password based on the given constraints.
Args:
password: The password to validate.
has_number: Use numbers (0-9) in the password.
has_lowercase: Use lowercase characters (a-z) in the password.
has_uppercase: Use uppercase characters (A-Z) in the password.
has_special_char: Use special characters (!@#$%^&*()_+{}[]:;"\'<>.,.?/|) in the password.
Returns:
The validated password, or raises a HTTPException if the password is invalid.
"""
cls.__validate_length(password)
if has_number:
cls.__validate_pattern(password,
r'[0-9]', 'Invalid password. Must contain at least one number (0-9).')
if has_uppercase:
cls.__validate_pattern(password, r'[A-Z]',
'Invalid password. Must contain at least one uppercase letter (A-Z).')
if has_lowercase:
cls.__validate_pattern(password, r'[a-z]',
'Invalid password. Must contain at least one lowercase letter (a-z).')
if has_special_char:
cls.__validate_pattern(password, r'[!@#$%^&*()_+{}\[\]:;"\'<>,.?/\\|]',
'Invalid password. Must contain at least one special character.')
return password
@classmethod
def __validate_length(cls, password: str):
if len(password) < cls.min_length or len(password) > cls.max_length:
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail=f'Invalid password length. Must be between {cls.min_length} and {cls.max_length} characters.'
)
@classmethod
def __validate_pattern(cls, password: str, pattern: str, message: str):
if not re.search(pattern, password):
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail=message
)
# ---------------------
# --- Hash Password ---
# ---------------------
@classmethod
def hash_password(cls, password: str):
return cls.password_context.hash(password)
@classmethod
def verify_password(cls, plain_password: str, hashed_password: str):
return cls.password_context.verify(plain_password, hashed_password)
# Path: apps/accounts/services/token.py
class TokenService:
"""
Manage "jwt-token" or "otp-token" that used for authentication.
"""
user: User | None
user_id: int
app_config = AppConfig.get_config()
ALGORITHM = "HS256"
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="accounts/login")
credentials_exception = HTTPException(status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials.",
headers={"WWW-Authenticate": "Bearer"})
def __init__(self, user: int | User | None = None):
if user is not None:
if isinstance(user, User):
self.user = user
self.user_id = user.id
else:
self.user = None
self.user_id = user
# --------------------
# --- Access Token ---
# --------------------
"""
Utility class for handling JWT authentication and access tokens.
A user's access token will be expired due to actions such as "resetting the password," "changing the password," or
even "logging out" (logout mechanism).
The `access-token` stored in the database serves as a flag for the logout mechanism, ensuring that when a user
wants to log out of the system, the current token will no longer be valid.
"""
def create_access_token(self) -> str:
"""
Create a new access token for the provided user.
Returns:
str: Access token string.
"""
# --- set data to encode ---
to_encode = {'user_id': self.user_id}
# --- set expire date ---
to_encode.update({"exp": datetime.utcnow() + timedelta(self.app_config.access_token_expire_minutes)})
# --- generate access token ---
access_token = jwt.encode(to_encode, self.app_config.secret_key, algorithm=self.ALGORITHM)
self.update_access_token(access_token)
return access_token
def update_access_token(self, token: str):
UserVerification.update(UserVerification.filter(UserVerification.user_id == self.user_id).first().id,
active_access_token=token)
def reset_access_token(self):
UserVerification.update(UserVerification.filter(UserVerification.user_id == self.user_id).first().id,
active_access_token=None)
@classmethod
async def fetch_user(cls, token: str) -> User:
"""
Retrieve the user associated with the provided JWT token.
Args:
token (str): JWT token.
Returns:
User: User object if the token is valid, raises HTTPException if not.
"""
# --- validate token ---
try:
payload = jwt.decode(token, cls.app_config.secret_key, algorithms=[cls.ALGORITHM])
except JWTError as e:
raise cls.credentials_exception
# --- validate payloads in token ---
user_id = payload.get("user_id")
if user_id is None:
raise cls.credentials_exception
# --- get user ---
# TODO move user data to token and dont fetch them from database
user = UserManager.get_user(user_id)
if user is None:
raise cls.credentials_exception
UserManager.is_active(user)
# --- validate access token ---
active_access_token = UserVerification.filter(UserVerification.user_id == user_id).first().active_access_token
if token != active_access_token:
raise cls.credentials_exception
UserManager.is_active(user)
return user
# -----------------
# --- OTP Token ---
# -----------------
@classmethod
def create_otp_token(cls):
totp = TOTP(cls.app_config.otp_secret_key, interval=cls.app_config.otp_expire_seconds)
return totp.now()
def request_is_register(self):
"""
Will be used just when a new user is registered.
"""
UserVerification.create(user_id=self.user_id, request_type='register')
def get_new_email(self):
_change: UserVerification = UserVerification.filter(UserVerification.user_id == self.user_id).first()
if _change.request_type == 'change-email':
return _change.new_email
return False
def request_is_change_email(self, new_email: str):
_change = UserVerification.filter(UserVerification.user_id == self.user_id).first().id
UserVerification.update(_change, new_email=new_email, request_type='change-email')
def reset_is_change_email(self):
_change = UserVerification.filter(UserVerification.user_id == self.user_id).first().id
UserVerification.update(_change, new_email=None, request_type=None)
def reset_is_reset_password(self):
_change = UserVerification.filter(UserVerification.user_id == self.user_id).first().id
UserVerification.update(_change, request_type='reset-password')
def reset_otp_token_type(self):
"""
Remove the request_type for otp token by set it to None.
"""
_change = UserVerification.filter(UserVerification.user_id == self.user_id).first().id
UserVerification.update(_change, request_type=None)
def get_otp_request_type(self):
return UserVerification.filter(UserVerification.user_id == self.user_id).first().request_type
@classmethod
def validate_otp_token(cls, token: str):
totp = TOTP(cls.app_config.otp_secret_key, interval=cls.app_config.otp_expire_seconds)
return totp.verify(token)
@classmethod
def check_time_remaining(cls):
totp = TOTP(cls.app_config.otp_secret_key, interval=cls.app_config.otp_expire_seconds)
time_remaining = int(totp.interval - datetime.now().timestamp() % totp.interval)
if time_remaining != 0:
# OTP has not expired, do not resend
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=f"OTP not expired. Resend available in {time_remaining} seconds.")
# Path: apps/accounts/services/user.py
class UserManager:
@classmethod
def create_user(cls, email: str, password: str, first_name: str | None = None, last_name: str | None = None,
is_verified_email: bool = False, is_active: bool = False, is_superuser: bool = False,
role: str = 'user', updated_at: DateTime = None, last_login: DateTime = None):
user_data = {
"email": email,
"password": PasswordManager.hash_password(password),
"first_name": first_name,
"last_name": last_name,
"is_verified_email": is_verified_email,
"is_active": is_active,
"is_superuser": is_superuser,
"role": role,
"updated_at": updated_at,
"last_login": last_login
}
user = User.create(**user_data)
return user
@staticmethod
def get_user(user_id: int | None = None, email: str = None) -> User | None:
"""
Retrieve a user based on their ID or email address.
Args:
user_id (int | None): The ID of the user to retrieve. Defaults to None.
email (str | None): The email address of the user to retrieve. Defaults to None.
Returns:
User | None: A User object if a user is found based on the provided ID or email,
or None if no user is found.
"""
if user_id:
user = User.get(user_id)
elif email:
user = User.filter(User.email == email).first()
else:
return None
if user is None:
return None
return user
@staticmethod
def get_user_or_404(user_id: int | None = None, email: str = None):
user: User | None = None
if user_id:
user = User.get_or_404(user_id)
elif email:
user = User.filter(User.email == email).first()
if not user:
raise HTTPException(status_code=404, detail="User not found.")
return user
@classmethod
def update_user(cls, user_id: int, email: str | None = None, password: str | None = None,
first_name: str | None = None, last_name: str | None = None, is_verified_email: bool | None = None,
is_active: bool | None = None, is_superuser: bool | None = None, role: str | None = None,
last_login: DateTime | None = None):
"""
Update a user by their ID.
"""
user_data = {}
if first_name is not None:
user_data["first_name"] = first_name
if last_name is not None:
user_data["last_name"] = last_name
if email is not None:
user_data["email"] = email
if password is not None:
user_data["password"] = PasswordManager.hash_password(password)
if is_verified_email is not None:
user_data["is_verified_email"] = is_verified_email
if is_active is not None:
user_data["is_active"] = is_active
if is_superuser is not None:
user_data["is_superuser"] = is_superuser
if role is not None:
user_data["role"] = role
if last_login is not None:
user_data["last_login"] = last_login
return User.update(user_id, **user_data)
@classmethod
def update_last_login(cls, user_id: int):
"""
Update user's last login.
"""
User.update(user_id, last_login=DateTime.now())
@staticmethod
def to_dict(user: User):
"""
Convert a User object to a dictionary.
"""
_dict = {
'user_id': user.id,
'email': user.email,
'first_name': user.first_name,
'last_name': user.last_name,
'is_verified_email': user.is_verified_email,
'date_joined': DateTime.string(user.date_joined),
'updated_at': DateTime.string(user.updated_at),
'last_login': DateTime.string(user.last_login)
}
return _dict
@classmethod
def new_user(cls, **user_data):
return User.create(**user_data)
@staticmethod
def is_active(user: User):
if not user.is_active:
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Inactive user.")
@staticmethod
def is_verified_email(user: User):
if not user.is_verified_email:
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
detail="Pleas verify your email address to continue.")
# TODO guide user to follow the steps need to verify email address.
# Path: apps/core/date_time.py
class DateTime:
@classmethod
def string(cls, obj: datetime):
"""
Convert a datetime object to a formatted string.
This method takes a datetime object `obj` and converts it into a string
in the format 'YYYY-MM-DD HH:MM:SS'. If `obj` is None or evaluates to False,
it returns None.
Parameters:
cls (object): An instance of the class (although this argument is not used).
obj (datetime or None): The datetime object to be converted to a string.
Returns:
str or None: A formatted string representation of the datetime object,
or None if the input is None or evaluates to False.
"""
return obj.strftime('%Y-%m-%d %H:%M:%S') if obj else None
@classmethod
def now(cls):
return datetime.strptime(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), '%Y-%m-%d %H:%M:%S')
# Path: apps/core/services/email_manager.py
class EmailService:
config = EmailServiceConfig.get_config()
app = AppConfig.get_config()
@classmethod
def __send_email(cls, subject: str, body: str, to_address: str):
try:
message = MIMEMultipart()
message['From'] = cls.config.smtp_username
message['To'] = to_address
message['Subject'] = subject
message.attach(MIMEText(body, 'plain'))
# Connect to the SMTP server
with smtplib.SMTP_SSL(cls.config.smtp_server, cls.config.smtp_port) as server:
# server.set_debuglevel(1)
server.login(cls.config.smtp_username, cls.config.smtp_password)
server.sendmail(cls.config.smtp_username, to_address, message.as_string())
# server.quit()
except Exception as e:
print(f"An error occurred while sending email: {e}")
@classmethod
def __print_test_otp(cls, otp: str):
dev_show = f"--- Testing OTP: {otp} ---"
print(dev_show)
@classmethod
def __send_verification_email(cls, subject, body, to_address):
"""
Sends a verification email or prints OTP in testing mode.
"""
if is_running() or cls.config.use_local_fallback:
cls.__print_test_otp(TokenService.create_otp_token())
else:
cls.__send_email(subject, body, to_address)
@classmethod
def register_send_verification_email(cls, to_address):
"""
Sends a verification email for the registration process.
"""
otp = TokenService.create_otp_token()
subject = 'Email Verification'
body = f"Thank you for registering with {cls.app.app_name}!\n\n" \
f"To complete your registration, please enter the following code: {otp}\n\n" \
f"If you didn't register, please ignore this email."
cls.__send_verification_email(subject, body, to_address)
@classmethod
def reset_password_send_verification_email(cls, to_address):
"""
Sends a verification email for the password reset process.
"""
otp = TokenService.create_otp_token()
subject = 'Password Reset Verification'
body = f"We received a request to reset your {cls.app.app_name} password.\n\n" \
f"Please enter the following code to reset your password: {otp}\n\n" \
f"If you didn't request this, you can ignore this email."
cls.__send_verification_email(subject, body, to_address)
@classmethod
def change_email_send_verification_email(cls, new_email: str):
"""
Sends a verification email for the email change process.
"""
otp = TokenService.create_otp_token()
subject = 'Email Change Verification'
body = f"We received a request to change the email associated with your {cls.app.app_name} account.\n\n" \
f"To confirm this change, please enter the following code: {otp}\n\n" \
f"If you didn't request this, please contact our support team."
cls.__send_verification_email(subject, body, new_email)
# Path: apps/accounts/services/authenticate.py
from fastapi import HTTPException, status, Depends
from fastapi.security import OAuth2PasswordBearer
from apps.accounts.models import User
from apps.accounts.services.password import PasswordManager
from apps.accounts.services.token import TokenService
from apps.accounts.services.user import UserManager
from apps.core.date_time import DateTime
from apps.core.services.email_manager import EmailService
class AccountService:
@classmethod
async def current_user(cls, token: str = Depends(OAuth2PasswordBearer(tokenUrl="accounts/login"))) -> User:
user = await TokenService.fetch_user(token)
| return user |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: jkulhanek/nerfbaselines
# Path: nerfbaselines/metrics.py
@_wrap_metric_arbitrary_shape
def torchmetrics_ssim(
a: np.ndarray,
b: np.ndarray,
*,
gaussian_kernel: bool = True,
sigma: Union[float, Sequence[float]] = 1.5,
kernel_size: Union[int, Sequence[int]] = 11,
data_range: Optional[Union[float, Tuple[float, float]]] = None,
k1: float = 0.01,
k2: float = 0.03,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""Compute Structural Similarity Index Measure.
NOTE: this metric exactly matches torchmetrics.ssim
Args:
preds: estimated image
target: ground truth image
gaussian_kernel: If true (default), a gaussian kernel is used, if false a uniform kernel is used
sigma: Standard deviation of the gaussian kernel, anisotropic kernels are possible.
Ignored if a uniform kernel is used
kernel_size: the size of the uniform kernel, anisotropic kernels are possible.
Ignored if a Gaussian kernel is used
data_range: Range of the image. If ``None``, it is determined from the image (max - min)
k1: Parameter of SSIM.
k2: Parameter of SSIM.
"""
assert a.ndim == b.ndim and a.ndim == 4, f"Expected preds and target to have dimension less than 5, got {a.ndim} and {b.ndim}"
a = np.transpose(a, (0, 3, 1, 2))
b = np.transpose(b, (0, 3, 1, 2))
def conv2d(a, f):
shape = a.shape
a = np.reshape(a, (-1, a.shape[-2], a.shape[-1]))
def conv2d_single(a, f):
s = f.shape + tuple(np.subtract(a.shape, f.shape) + 1)
strd = numpy.lib.stride_tricks.as_strided
subM = strd(a, shape=s, strides=a.strides * 2)
return np.einsum("ij,ijkl->kl", f, subM)
out = np.stack([conv2d_single(a[i], f) for i in range(len(a))])
return np.reshape(out, shape[:-2] + out.shape[-2:])
if not isinstance(kernel_size, Sequence):
kernel_size = 2 * [kernel_size]
if not isinstance(sigma, Sequence):
sigma = 2 * [sigma]
if len(kernel_size) != len(b.shape) - 2:
raise ValueError(f"`kernel_size` has dimension {len(kernel_size)}, but expected to be two less that target dimensionality," f" which is: {len(b.shape)}")
if len(kernel_size) not in (2, 3):
raise ValueError(f"Expected `kernel_size` dimension to be 2 or 3. `kernel_size` dimensionality: {len(kernel_size)}")
if len(sigma) != len(b.shape) - 2:
raise ValueError(f"`kernel_size` has dimension {len(kernel_size)}, but expected to be two less that target dimensionality," f" which is: {len(b.shape)}")
if len(sigma) not in (2, 3):
raise ValueError(f"Expected `kernel_size` dimension to be 2 or 3. `kernel_size` dimensionality: {len(kernel_size)}")
if any(x % 2 == 0 or x <= 0 for x in kernel_size):
raise ValueError(f"Expected `kernel_size` to have odd positive number. Got {kernel_size}.")
if any(y <= 0 for y in sigma):
raise ValueError(f"Expected `sigma` to have positive number. Got {sigma}.")
if data_range is None:
data_range = max(a.max() - a.min(), b.max() - b.min())
elif isinstance(data_range, tuple):
a = np.clip(a, data_range[0], data_range[1])
b = np.clip(b, data_range[0], data_range[1])
data_range = data_range[1] - data_range[0]
assert isinstance(data_range, float), f"Expected data_range to be float, got {type(data_range)}"
c1 = pow(k1 * data_range, 2)
c2 = pow(k2 * data_range, 2)
dtype = a.dtype
gauss_kernel_size = [int(3.5 * s + 0.5) * 2 + 1 for s in sigma]
pad_h = (gauss_kernel_size[0] - 1) // 2
pad_w = (gauss_kernel_size[1] - 1) // 2
a = np.pad(a, ((0, 0), (0, 0), (pad_w, pad_w), (pad_h, pad_h)), mode="reflect")
b = np.pad(b, ((0, 0), (0, 0), (pad_w, pad_w), (pad_h, pad_h)), mode="reflect")
if gaussian_kernel:
kernel = _gaussian_kernel_2d(gauss_kernel_size, sigma, dtype)
if not gaussian_kernel:
kernel = np.ones(kernel_size, dtype=dtype) / np.prod(np.array(kernel_size, dtype=dtype))
input_list = np.concatenate((a, b, a * a, b * b, a * b)) # (5 * B, C, H, W)
outputs: np.ndarray = conv2d(input_list, kernel)
output_list = np.split(outputs, 5)
mu_pred_sq = np.power(output_list[0], 2)
mu_target_sq = np.power(output_list[1], 2)
mu_pred_target = output_list[0] * output_list[1]
sigma_pred_sq: np.ndarray = output_list[2] - mu_pred_sq
sigma_target_sq: np.ndarray = output_list[3] - mu_target_sq
sigma_pred_target: np.ndarray = output_list[4] - mu_pred_target
upper = 2 * sigma_pred_target.astype(dtype) + c2
lower = (sigma_pred_sq + sigma_target_sq).astype(dtype) + c2
ssim_idx_full_image = ((2 * mu_pred_target + c1) * upper) / ((mu_pred_sq + mu_target_sq + c1) * lower)
ssim_idx: np.ndarray = ssim_idx_full_image[..., pad_h:-pad_h, pad_w:-pad_w]
return np.reshape(ssim_idx, (ssim_idx.shape[0], -1)).mean(-1)
# Path: nerfbaselines/metrics.py
@_wrap_metric_arbitrary_shape
def dmpix_ssim(
a: np.ndarray,
b: np.ndarray,
*,
max_val: float = 1.0,
kernel_size: int = 11,
sigma: float = 1.5,
k1: float = 0.01,
k2: float = 0.03,
return_map: bool = False,
filter_fn: Optional[Callable[[np.ndarray], np.ndarray]] = None,
) -> np.ndarray:
"""Computes the structural similarity index (SSIM) between image pairs.
This function is based on the standard SSIM implementation from:
Z. Wang, A. C. Bovik, H. R. Sheikh and E. P. Simoncelli,
"Image quality assessment: from error visibility to structural similarity",
in IEEE Transactions on Image Processing, vol. 13, no. 4, pp. 600-612, 2004.
This function was modeled after tf.image.ssim, and should produce comparable
output.
Note: the true SSIM is only defined on grayscale. This function does not
perform any colorspace transform. If the input is in a color space, then it
will compute the average SSIM.
NOTE: This function exactly matches dm_pix.ssim
Args:
a: First image (or set of images).
b: Second image (or set of images).
max_val: The maximum magnitude that `a` or `b` can have.
kernel_size: Window size (>= 1). Image dims must be at least this small.
sigma: The bandwidth of the Gaussian used for filtering (> 0.).
k1: One of the SSIM dampening parameters (> 0.).
k2: One of the SSIM dampening parameters (> 0.).
return_map: If True, will cause the per-pixel SSIM "map" to be returned.
precision: The numerical precision to use when performing convolution.
Returns:
Each image's mean SSIM, or a tensor of individual values if `return_map`.
"""
# DO NOT REMOVE - Logging usage.
assert a.shape == b.shape, f"Images must have the same shape, got {a.shape} and {b.shape}"
assert a.dtype.kind == "f" and b.dtype.kind == "f", f"Expected floating point inputs, got {a.dtype} and {b.dtype}"
if filter_fn is None:
# Construct a 1D Gaussian blur filter.
hw = kernel_size // 2
shift = (2 * hw - kernel_size + 1) / 2
f_i = ((np.arange(kernel_size) - hw + shift) / sigma) ** 2
filt = np.exp(-0.5 * f_i)
filt /= np.sum(filt)
# Construct a 1D convolution.
def filter_fn_1(z):
return np.convolve(z, filt, mode="valid")
# jax.vmap(filter_fn_1)
filter_fn_vmap = lambda x: np.stack([filter_fn_1(y) for y in x], 0) # noqa: E731
# Apply the vectorized filter along the y axis.
def filter_fn_y(z):
z_flat = np.moveaxis(z, -3, -1).reshape((-1, z.shape[-3]))
z_filtered_shape = ((z.shape[-4],) if z.ndim == 4 else ()) + (
z.shape[-2],
z.shape[-1],
-1,
)
z_filtered = np.moveaxis(filter_fn_vmap(z_flat).reshape(z_filtered_shape), -1, -3)
return z_filtered
# Apply the vectorized filter along the x axis.
def filter_fn_x(z):
z_flat = np.moveaxis(z, -2, -1).reshape((-1, z.shape[-2]))
z_filtered_shape = ((z.shape[-4],) if z.ndim == 4 else ()) + (
z.shape[-3],
z.shape[-1],
-1,
)
z_filtered = np.moveaxis(filter_fn_vmap(z_flat).reshape(z_filtered_shape), -1, -2)
return z_filtered
# Apply the blur in both x and y.
filter_fn = lambda z: filter_fn_y(filter_fn_x(z)) # noqa: E731
mu0 = filter_fn(a)
mu1 = filter_fn(b)
mu00 = mu0 * mu0
mu11 = mu1 * mu1
mu01 = mu0 * mu1
sigma00 = filter_fn(a**2) - mu00
sigma11 = filter_fn(b**2) - mu11
sigma01 = filter_fn(a * b) - mu01
# Clip the variances and covariances to valid values.
# Variance must be non-negative:
epsilon = np.finfo(np.float32).eps ** 2
sigma00 = np.maximum(epsilon, sigma00)
sigma11 = np.maximum(epsilon, sigma11)
sigma01 = np.sign(sigma01) * np.minimum(np.sqrt(sigma00 * sigma11), np.abs(sigma01))
c1 = (k1 * max_val) ** 2
c2 = (k2 * max_val) ** 2
numer = (2 * mu01 + c1) * (2 * sigma01 + c2)
denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2)
ssim_map = numer / denom
ssim_value = np.mean(ssim_map, tuple(range(-3, 0)))
return ssim_map if return_map else ssim_value
# Path: nerfbaselines/metrics.py
def _wrap_metric_arbitrary_shape(fn):
def wrapped(a, b, **kwargs):
def dmpix_ssim(
a: np.ndarray,
b: np.ndarray,
*,
max_val: float = 1.0,
kernel_size: int = 11,
sigma: float = 1.5,
k1: float = 0.01,
k2: float = 0.03,
return_map: bool = False,
filter_fn: Optional[Callable[[np.ndarray], np.ndarray]] = None,
) -> np.ndarray:
def filter_fn_1(z):
def filter_fn_y(z):
def filter_fn_x(z):
def _gaussian(kernel_size: int, sigma: float, dtype: np.dtype) -> np.ndarray:
def _gaussian_kernel_2d(
kernel_size: Sequence[int],
sigma: Sequence[float],
dtype: np.dtype,
) -> np.ndarray:
def torchmetrics_ssim(
a: np.ndarray,
b: np.ndarray,
*,
gaussian_kernel: bool = True,
sigma: Union[float, Sequence[float]] = 1.5,
kernel_size: Union[int, Sequence[int]] = 11,
data_range: Optional[Union[float, Tuple[float, float]]] = None,
k1: float = 0.01,
k2: float = 0.03,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
def conv2d(a, f):
def conv2d_single(a, f):
def _mean(metric):
def _normalize_input(a):
def ssim(a: np.ndarray, b: np.ndarray) -> Union[np.ndarray, np.float32]:
def mse(a: np.ndarray, b: np.ndarray) -> Union[np.ndarray, np.float32]:
def mae(a: np.ndarray, b: np.ndarray) -> Union[np.ndarray, np.float32]:
def psnr(a: Union[np.ndarray, np.float32, np.float64], b: Optional[np.ndarray] = None) -> Union[np.ndarray, np.float32, np.float64]:
def _lpips(a, b, net, version="0.1"):
def lpips_alex(a: np.ndarray, b: np.ndarray) -> Union[np.ndarray, np.float32]:
def lpips_vgg(a: np.ndarray, b: np.ndarray) -> Union[np.ndarray, np.float32]:
_LPIPS_CACHE = {}
_LPIPS_GPU_AVAILABLE = None
_LPIPS_GPU_AVAILABLE = torch.cuda.is_available()
_LPIPS_GPU_AVAILABLE = False
# Path: tests/test_metrics.py
import numpy as np
import pytest
import torch
import torchmetrics
import dm_pix
import dm_pix
from nerfbaselines.metrics import torchmetrics_ssim, dmpix_ssim
from nerfbaselines import metrics
@pytest.mark.extras
@pytest.mark.filterwarnings("ignore::UserWarning:torchvision")
@pytest.mark.parametrize("kernel_size", [None, 3])
@pytest.mark.parametrize("sigma", [None, 0.5])
def test_torchmetrics_ssim(kernel_size, sigma):
np.random.seed(42)
a = np.random.rand(3, 47, 41, 3) * 0.9 + 0.05
b = np.random.rand(3, 47, 41, 3) * 0.9 + 0.05
kwargs = {}
if kernel_size is not None:
kwargs["kernel_size"] = kernel_size
if sigma is not None:
kwargs["sigma"] = sigma
a_torch = torch.from_numpy(a).permute(0, 3, 1, 2)
b_torch = torch.from_numpy(b).permute(0, 3, 1, 2)
reference_ssim = torchmetrics.functional.structural_similarity_index_measure(a_torch, b_torch, reduction="none", data_range=(0.0, 1.0), **kwargs).numpy()
ssim = torchmetrics_ssim(a, b, data_range=(0.0, 1.0), **kwargs)
assert isinstance(ssim, np.ndarray)
assert ssim.shape == (3,)
np.testing.assert_allclose(ssim, reference_ssim, atol=1e-5, rtol=0)
if kernel_size is None and sigma is None:
# SSIM matches for default parameters
reference2 = dm_pix.ssim(a, b, **kwargs)
np.testing.assert_allclose(ssim, reference2, atol=1e-5, rtol=0)
@pytest.mark.extras
@pytest.mark.filterwarnings("ignore::UserWarning:torchvision")
@pytest.mark.parametrize("kernel_size", [None, 3])
@pytest.mark.parametrize("sigma", [None, 0.5])
def test_dmpix_ssim(kernel_size, sigma):
np.random.seed(42)
a = np.random.rand(3, 47, 41, 3) * 0.9 + 0.05
b = np.random.rand(3, 47, 41, 3) * 0.9 + 0.05
kwargs = {}
kwargs_dmpix = {}
if kernel_size is not None:
kwargs["kernel_size"] = kernel_size
kwargs_dmpix["filter_size"] = kernel_size
if sigma is not None:
kwargs["sigma"] = sigma
kwargs_dmpix["filter_sigma"] = sigma
reference_ssim = dm_pix.ssim(a, b, **kwargs_dmpix)
ssim = dmpix_ssim(a, b, **kwargs)
assert isinstance(ssim, np.ndarray)
assert ssim.shape == (3,)
np.testing.assert_allclose(ssim, reference_ssim, atol=1e-5, rtol=0)
@pytest.mark.filterwarnings("ignore::UserWarning:torchvision")
@pytest.mark.parametrize("metric", ["torchmetrics_ssim", "dmpix_ssim", "ssim", "mse", "mae", "psnr"])
def test_metric(metric):
np.random.seed(42)
batch_shapes = [
(3,),
(2, 2),
(
2,
1,
1,
),
]
for bs in batch_shapes:
a = np.random.rand(*bs, 47, 31, 3)
b = np.random.rand(*bs, 47, 31, 3)
val = getattr(metrics, metric)(a, b)
assert isinstance(val, np.ndarray)
assert val.shape == bs
# Different shape raises error
with pytest.raises(Exception):
getattr(metrics, metric)(a, b[:-1])
def test_psnr():
np.random.seed(42)
batch_shapes = [
(3,),
(2, 2),
(
2,
1,
| 1, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: UMass-Foundation-Model/CoVLM
# Path: GroundingDINO/groundingdino/models/GroundingDINO/fuse_modules.py
class BiAttentionBlock(nn.Module):
def __init__(
self,
v_dim,
l_dim,
embed_dim,
num_heads,
dropout=0.1,
drop_path=0.0,
init_values=1e-4,
cfg=None,
):
"""
Inputs:
embed_dim - Dimensionality of input and attention feature vectors
hidden_dim - Dimensionality of hidden layer in feed-forward network
(usually 2-4x larger than embed_dim)
num_heads - Number of heads to use in the Multi-Head Attention block
dropout - Amount of dropout to apply in the feed-forward network
"""
super(BiAttentionBlock, self).__init__()
# pre layer norm
self.layer_norm_v = nn.LayerNorm(v_dim)
self.layer_norm_l = nn.LayerNorm(l_dim)
self.attn = BiMultiHeadAttention(
v_dim=v_dim, l_dim=l_dim, embed_dim=embed_dim, num_heads=num_heads, dropout=dropout
)
# add layer scale for training stability
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.gamma_v = nn.Parameter(init_values * torch.ones((v_dim)), requires_grad=True)
self.gamma_l = nn.Parameter(init_values * torch.ones((l_dim)), requires_grad=True)
def forward(self, v, l, attention_mask_v=None, attention_mask_l=None):
v = self.layer_norm_v(v)
l = self.layer_norm_l(l)
delta_v, delta_l = self.attn(
v, l, attention_mask_v=attention_mask_v, attention_mask_l=attention_mask_l
)
# v, l = v + delta_v, l + delta_l
v = v + self.drop_path(self.gamma_v * delta_v)
l = l + self.drop_path(self.gamma_l * delta_l)
return v, l
# def forward(self, v:List[torch.Tensor], l, attention_mask_v=None, attention_mask_l=None)
# Path: GroundingDINO/groundingdino/models/GroundingDINO/ms_deform_attn.py
class MultiScaleDeformableAttention(nn.Module):
"""Multi-Scale Deformable Attention Module used in Deformable-DETR
`Deformable DETR: Deformable Transformers for End-to-End Object Detection.
<https://arxiv.org/pdf/2010.04159.pdf>`_.
Args:
embed_dim (int): The embedding dimension of Attention. Default: 256.
num_heads (int): The number of attention heads. Default: 8.
num_levels (int): The number of feature map used in Attention. Default: 4.
num_points (int): The number of sampling points for each query
in each head. Default: 4.
img2col_steps (int): The step used in image_to_column. Defualt: 64.
dropout (float): Dropout layer used in output. Default: 0.1.
batch_first (bool): if ``True``, then the input and output tensor will be
provided as `(bs, n, embed_dim)`. Default: False. `(n, bs, embed_dim)`
"""
def __init__(
self,
embed_dim: int = 256,
num_heads: int = 8,
num_levels: int = 4,
num_points: int = 4,
img2col_step: int = 64,
batch_first: bool = False,
):
super().__init__()
if embed_dim % num_heads != 0:
raise ValueError(
"embed_dim must be divisible by num_heads, but got {} and {}".format(
embed_dim, num_heads
)
)
head_dim = embed_dim // num_heads
self.batch_first = batch_first
if not _is_power_of_2(head_dim):
warnings.warn(
"""
You'd better set d_model in MSDeformAttn to make sure that
each dim of the attention head a power of 2, which is more efficient.
"""
)
self.im2col_step = img2col_step
self.embed_dim = embed_dim
self.num_heads = num_heads
self.num_levels = num_levels
self.num_points = num_points
self.sampling_offsets = nn.Linear(embed_dim, num_heads * num_levels * num_points * 2)
self.attention_weights = nn.Linear(embed_dim, num_heads * num_levels * num_points)
self.value_proj = nn.Linear(embed_dim, embed_dim)
self.output_proj = nn.Linear(embed_dim, embed_dim)
self.init_weights()
def _reset_parameters(self):
return self.init_weights()
def init_weights(self):
"""
Default initialization for Parameters of Module.
"""
constant_(self.sampling_offsets.weight.data, 0.0)
thetas = torch.arange(self.num_heads, dtype=torch.float32) * (
2.0 * math.pi / self.num_heads
)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (
(grid_init / grid_init.abs().max(-1, keepdim=True)[0])
.view(self.num_heads, 1, 1, 2)
.repeat(1, self.num_levels, self.num_points, 1)
)
for i in range(self.num_points):
grid_init[:, :, i, :] *= i + 1
with torch.no_grad():
self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
constant_(self.attention_weights.weight.data, 0.0)
constant_(self.attention_weights.bias.data, 0.0)
xavier_uniform_(self.value_proj.weight.data)
constant_(self.value_proj.bias.data, 0.0)
xavier_uniform_(self.output_proj.weight.data)
constant_(self.output_proj.bias.data, 0.0)
def freeze_sampling_offsets(self):
print("Freeze sampling offsets")
self.sampling_offsets.weight.requires_grad = False
self.sampling_offsets.bias.requires_grad = False
def freeze_attention_weights(self):
print("Freeze attention weights")
self.attention_weights.weight.requires_grad = False
self.attention_weights.bias.requires_grad = False
def forward(
self,
query: torch.Tensor,
key: Optional[torch.Tensor] = None,
value: Optional[torch.Tensor] = None,
query_pos: Optional[torch.Tensor] = None,
key_padding_mask: Optional[torch.Tensor] = None,
reference_points: Optional[torch.Tensor] = None,
spatial_shapes: Optional[torch.Tensor] = None,
level_start_index: Optional[torch.Tensor] = None,
**kwargs
) -> torch.Tensor:
"""Forward Function of MultiScaleDeformableAttention
Args:
query (torch.Tensor): Query embeddings with shape
`(num_query, bs, embed_dim)`
key (torch.Tensor): Key embeddings with shape
`(num_key, bs, embed_dim)`
value (torch.Tensor): Value embeddings with shape
`(num_key, bs, embed_dim)`
query_pos (torch.Tensor): The position embedding for `query`. Default: None.
key_padding_mask (torch.Tensor): ByteTensor for `query`, with shape `(bs, num_key)`,
indicating which elements within `key` to be ignored in attention.
reference_points (torch.Tensor): The normalized reference points
with shape `(bs, num_query, num_levels, 2)`,
all elements is range in [0, 1], top-left (0, 0),
bottom-right (1, 1), including padding are.
or `(N, Length_{query}, num_levels, 4)`, add additional
two dimensions `(h, w)` to form reference boxes.
spatial_shapes (torch.Tensor): Spatial shape of features in different levels.
With shape `(num_levels, 2)`, last dimension represents `(h, w)`.
level_start_index (torch.Tensor): The start index of each level. A tensor with
shape `(num_levels, )` which can be represented as
`[0, h_0 * w_0, h_0 * w_0 + h_1 * w_1, ...]`.
Returns:
torch.Tensor: forward results with shape `(num_query, bs, embed_dim)`
"""
if value is None:
value = query
if query_pos is not None:
query = query + query_pos
if not self.batch_first:
# change to (bs, num_query ,embed_dims)
query = query.permute(1, 0, 2)
value = value.permute(1, 0, 2)
bs, num_query, _ = query.shape
bs, num_value, _ = value.shape
assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value
value = self.value_proj(value)
if key_padding_mask is not None:
value = value.masked_fill(key_padding_mask[..., None], float(0))
value = value.view(bs, num_value, self.num_heads, -1)
sampling_offsets = self.sampling_offsets(query).view(
bs, num_query, self.num_heads, self.num_levels, self.num_points, 2
)
attention_weights = self.attention_weights(query).view(
bs, num_query, self.num_heads, self.num_levels * self.num_points
)
attention_weights = attention_weights.softmax(-1)
attention_weights = attention_weights.view(
bs,
num_query,
self.num_heads,
self.num_levels,
self.num_points,
)
# bs, num_query, num_heads, num_levels, num_points, 2
if reference_points.shape[-1] == 2:
offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)
sampling_locations = (
reference_points[:, :, None, :, None, :]
+ sampling_offsets / offset_normalizer[None, None, None, :, None, :]
)
elif reference_points.shape[-1] == 4:
sampling_locations = (
reference_points[:, :, None, :, None, :2]
+ sampling_offsets
/ self.num_points
* reference_points[:, :, None, :, None, 2:]
* 0.5
)
else:
raise ValueError(
"Last dim of reference_points must be 2 or 4, but get {} instead.".format(
reference_points.shape[-1]
)
)
if torch.cuda.is_available() and value.is_cuda:
halffloat = False
if value.dtype == torch.float16:
halffloat = True
value = value.float()
sampling_locations = sampling_locations.float()
attention_weights = attention_weights.float()
output = MultiScaleDeformableAttnFunction.apply(
value,
spatial_shapes,
level_start_index,
sampling_locations,
attention_weights,
self.im2col_step,
)
if halffloat:
output = output.half()
else:
output = multi_scale_deformable_attn_pytorch(
value, spatial_shapes, sampling_locations, attention_weights
)
output = self.output_proj(output)
if not self.batch_first:
output = output.permute(1, 0, 2)
return output
# Path: GroundingDINO/groundingdino/models/GroundingDINO/transformer_vanilla.py
class TransformerEncoderLayer(nn.Module):
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
self.nhead = nhead
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward(
self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
):
# repeat attn mask
if src_mask.dim() == 3 and src_mask.shape[0] == src.shape[1]:
# bs, num_q, num_k
src_mask = src_mask.repeat(self.nhead, 1, 1)
q = k = self.with_pos_embed(src, pos)
src2 = self.self_attn(q, k, value=src, attn_mask=src_mask)[0]
# src2 = self.self_attn(q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
# Path: GroundingDINO/groundingdino/models/GroundingDINO/utils.py
class MLP(nn.Module):
"""Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(
nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
)
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
# Path: GroundingDINO/groundingdino/models/GroundingDINO/utils.py
def _get_activation_fn(activation, d_model=256, batch_dim=0):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
if activation == "prelu":
return nn.PReLU()
if activation == "selu":
return F.selu
raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
# Path: GroundingDINO/groundingdino/models/GroundingDINO/utils.py
def _get_clones(module, N, layer_share=False):
# import ipdb; ipdb.set_trace()
if layer_share:
return nn.ModuleList([module for i in range(N)])
else:
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
# Path: GroundingDINO/groundingdino/models/GroundingDINO/utils.py
def gen_encoder_output_proposals(
memory: Tensor, memory_padding_mask: Tensor, spatial_shapes: Tensor, learnedwh=None
):
"""
Input:
- memory: bs, \sum{hw}, d_model
- memory_padding_mask: bs, \sum{hw}
- spatial_shapes: nlevel, 2
- learnedwh: 2
Output:
- output_memory: bs, \sum{hw}, d_model
- output_proposals: bs, \sum{hw}, 4
"""
N_, S_, C_ = memory.shape
proposals = []
_cur = 0
for lvl, (H_, W_) in enumerate(spatial_shapes):
mask_flatten_ = memory_padding_mask[:, _cur : (_cur + H_ * W_)].view(N_, H_, W_, 1)
valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)
# import ipdb; ipdb.set_trace()
grid_y, grid_x = torch.meshgrid(
torch.linspace(0, H_ - 1, H_, dtype=torch.float32, device=memory.device),
torch.linspace(0, W_ - 1, W_, dtype=torch.float32, device=memory.device),
)
grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) # H_, W_, 2
scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N_, 1, 1, 2)
grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale
if learnedwh is not None:
# import ipdb; ipdb.set_trace()
wh = torch.ones_like(grid) * learnedwh.sigmoid() * (2.0**lvl)
else:
wh = torch.ones_like(grid) * 0.05 * (2.0**lvl)
# scale = torch.cat([W_[None].unsqueeze(-1), H_[None].unsqueeze(-1)], 1).view(1, 1, 1, 2).repeat(N_, 1, 1, 1)
# grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale
# wh = torch.ones_like(grid) / scale
proposal = torch.cat((grid, wh), -1).view(N_, -1, 4)
proposals.append(proposal)
_cur += H_ * W_
# import ipdb; ipdb.set_trace()
output_proposals = torch.cat(proposals, 1)
output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(
-1, keepdim=True
)
output_proposals = torch.log(output_proposals / (1 - output_proposals)) # unsigmoid
output_proposals = output_proposals.masked_fill(memory_padding_mask.unsqueeze(-1), float("inf"))
output_proposals = output_proposals.masked_fill(~output_proposals_valid, float("inf"))
output_memory = memory
output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0))
output_memory = output_memory.masked_fill(~output_proposals_valid, float(0))
# output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf'))
# output_memory = output_memory.masked_fill(~output_proposals_valid, float('inf'))
return output_memory, output_proposals
# Path: GroundingDINO/groundingdino/models/GroundingDINO/utils.py
def gen_sineembed_for_position(pos_tensor):
# n_query, bs, _ = pos_tensor.size()
# sineembed_tensor = torch.zeros(n_query, bs, 256)
scale = 2 * math.pi
dim_t = torch.arange(128, dtype=torch.float32, device=pos_tensor.device)
dim_t = 10000 ** (2 * (torch.div(dim_t, 2, rounding_mode='floor')) / 128)
x_embed = pos_tensor[:, :, 0] * scale
y_embed = pos_tensor[:, :, 1] * scale
pos_x = x_embed[:, :, None] / dim_t
pos_y = y_embed[:, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2)
pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2)
if pos_tensor.size(-1) == 2:
pos = torch.cat((pos_y, pos_x), dim=2)
elif pos_tensor.size(-1) == 4:
w_embed = pos_tensor[:, :, 2] * scale
pos_w = w_embed[:, :, None] / dim_t
pos_w = torch.stack((pos_w[:, :, 0::2].sin(), pos_w[:, :, 1::2].cos()), dim=3).flatten(2)
h_embed = pos_tensor[:, :, 3] * scale
pos_h = h_embed[:, :, None] / dim_t
pos_h = torch.stack((pos_h[:, :, 0::2].sin(), pos_h[:, :, 1::2].cos()), dim=3).flatten(2)
pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2)
else:
raise ValueError("Unknown pos_tensor shape(-1):{}".format(pos_tensor.size(-1)))
return pos
# Path: GroundingDINO/groundingdino/models/GroundingDINO/utils.py
def get_sine_pos_embed(
pos_tensor: torch.Tensor,
num_pos_feats: int = 128,
temperature: int = 10000,
exchange_xy: bool = True,
):
"""generate sine position embedding from a position tensor
Args:
pos_tensor (torch.Tensor): shape: [..., n].
num_pos_feats (int): projected shape for each float in the tensor.
temperature (int): temperature in the sine/cosine function.
exchange_xy (bool, optional): exchange pos x and pos y. \
For example, input tensor is [x,y], the results will be [pos(y), pos(x)]. Defaults to True.
Returns:
pos_embed (torch.Tensor): shape: [..., n*num_pos_feats].
"""
scale = 2 * math.pi
dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos_tensor.device)
dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / num_pos_feats)
def sine_func(x: torch.Tensor):
sin_x = x * scale / dim_t
sin_x = torch.stack((sin_x[..., 0::2].sin(), sin_x[..., 1::2].cos()), dim=3).flatten(2)
return sin_x
pos_res = [sine_func(x) for x in pos_tensor.split([1] * pos_tensor.shape[-1], dim=-1)]
if exchange_xy:
pos_res[0], pos_res[1] = pos_res[1], pos_res[0]
pos_res = torch.cat(pos_res, dim=-1)
return pos_res
# Path: GroundingDINO/groundingdino/models/GroundingDINO/transformer.py
from typing import Optional
from torch import Tensor, nn
from groundingdino.util.misc import inverse_sigmoid
from .fuse_modules import BiAttentionBlock
from .ms_deform_attn import MultiScaleDeformableAttention as MSDeformAttn
from .transformer_vanilla import TransformerEncoderLayer
from .utils import (
MLP,
_get_activation_fn,
_get_clones,
gen_encoder_output_proposals,
gen_sineembed_for_position,
get_sine_pos_embed,
)
import torch
import torch.utils.checkpoint as checkpoint
# ------------------------------------------------------------------------
# Grounding DINO
# url: https://github.com/IDEA-Research/GroundingDINO
# Copyright (c) 2023 IDEA. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# DINO
# Copyright (c) 2022 IDEA. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Conditional DETR Transformer class.
# Copyright (c) 2021 Microsoft. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# ------------------------------------------------------------------------
class Transformer(nn.Module):
def __init__(
self,
d_model=256,
nhead=8,
num_queries=300,
num_encoder_layers=6,
num_unicoder_layers=0,
num_decoder_layers=6,
dim_feedforward=2048,
dropout=0.0,
activation="relu",
normalize_before=False,
return_intermediate_dec=False,
query_dim=4,
num_patterns=0,
# for deformable encoder
num_feature_levels=1,
enc_n_points=4,
dec_n_points=4,
# init query
learnable_tgt_init=False,
# two stage
two_stage_type="no", # ['no', 'standard', 'early', 'combine', 'enceachlayer', 'enclayer1']
embed_init_tgt=False,
# for text
| use_text_enhancer=False, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: HKU-BAL/ClairS-TO
# Path: clairs/call_variants.py
ACGT = 'ACGT'
AU, CU, GU, TU = acgt_count
FAU, FCU, FGU, FTU = int(input_list_forward_acgt_count_ori[0]), int(input_list_forward_acgt_count_ori[1]), int(input_list_forward_acgt_count_ori[2]), int(input_list_forward_acgt_count_ori[3])
RAU, RCU, RGU, RTU = int(input_list_reverse_acgt_count_ori[0]), int(input_list_reverse_acgt_count_ori[1]), int(input_list_reverse_acgt_count_ori[2]), int(input_list_reverse_acgt_count_ori[3])
AU, CU, GU, TU = int(input_list_acgt_count_ori[0]), int(input_list_acgt_count_ori[1]), int(input_list_acgt_count_ori[2]), int(input_list_acgt_count_ori[3])
AD = None
AD = str(tumor_supported_reads_count) if is_reference else str(tumor_ref_num) + ',' + str(
tumor_supported_reads_count)
def filtration_value_from(quality_score_for_pass, quality_score, is_reference=False, is_variant=False):
def quality_score_from(probability, int_format=False, use_phred_qual=True):
def argmax(l):
def decode_acgt_count(alt_dict, ref_base=None, tumor_coverage=None):
def output_vcf_from_probability(
chromosome,
position,
reference_base,
tumor_alt_info,
input_forward_acgt_count_ori,
input_reverse_acgt_count_ori,
probabilities_a,
probabilities_c,
probabilities_g,
probabilities_t,
probabilities_na,
probabilities_nc,
probabilities_ng,
probabilities_nt,
likelihood_data_info_list,
output_config=None,
vcf_writer=None,
):
def decode_alt_info(alt_info):
def rank_variant_alt(tumor_alt_info_dict, tumor_read_depth):
def decode_alt_info(alt_info_dict, read_depth):
def call_variants_from_probability(args):
def main():
# Path: shared/utils.py
BASIC_BASES = set("ACGTU")
WARNING = '\033[93m'
ERROR = '\033[91m'
ENDC = '\033[0m'
def log_error(log):
def log_warning(log):
def is_file_exists(file_name, suffix=""):
def is_folder_exists(folder_name, suffix=""):
def legal_range_from(param_name, x, min_num=None, max_num=None, exit_out_of_range=False):
def file_path_from(file_name, suffix="", exit_on_not_found=False, sep="", allow_none=False, is_directory=False):
def folder_path_from(folder_name, create_not_found=True, exit_on_not_found=False):
def is_command_exists(command):
def executable_command_string_from(command_to_execute, exit_on_not_found=False):
def subprocess_popen(args, stdin=None, stdout=PIPE, stderr=stderr, bufsize=8388608):
def str_none(v):
def str2bool(v):
def region_from(ctg_name, ctg_start=None, ctg_end=None):
def reference_sequence_from(samtools_execute_command, fasta_file_path, regions):
def vcf_candidates_from(vcf_fn, contig_name=None):
def candidate_position_generator_from(
candidate,
flanking_base_num,
begin_to_end
):
def samtools_mpileup_generator_from(
candidate,
flanking_base_num,
begin_to_end
):
def samtools_view_process_from(
ctg_name,
ctg_start,
ctg_end,
samtools,
bam_file_path
):
def __init__(self, ctg_name=None,
genotype1=None,
genotype2=None,
pos=None,
ref_base=None,
alt_base=None,
candidate=False,
cigar_count=None,
confident_variant=False,
depth=None,
alt_list=None,
af=None,
filter=None,
af_list=None,
alt_type_mapping_dict=None,
extra_infos="",
qual=None,
row_str=None):
def update_info(self, ref_base, alt_base, genotype, extra_infos=""):
def __init__(self, pos, ref_base, depth, af_list, alt_dict, tumor_alt_dict, extra_infos=""):
def __init__(self, handle):
def __del__(self):
class Position(object):
class AltInfos(object):
class TensorStdout(object):
# Path: clairs/predict.py
import sys
import os
import numpy as np
import logging
import torch
import shlex
import shared.param as param
from time import time
from argparse import ArgumentParser, SUPPRESS
from threading import Thread
from sys import stderr
from subprocess import PIPE, run, Popen
from clairs.call_variants import output_vcf_from_probability, OutputConfig
from shared.utils import IUPAC_base_to_ACGT_base_dict as BASE2ACGT, BASIC_BASES, str2bool, file_path_from, log_error, \
log_warning, subprocess_popen, TensorStdout
from shared.vcf import VcfWriter
""
)
def DataGenerator(dataset, num_epoch, batch_size, chunk_start_pos, chunk_end_pos):
for idx in range(num_epoch):
start_pos = chunk_start_pos + idx * batch_size
end_pos = min(chunk_start_pos + (idx + 1) * batch_size, chunk_end_pos)
input_matrix = dataset.input_matrix[start_pos:end_pos]
position = dataset.position[start_pos:end_pos] # .flatten()
tumor_alt_info_list = dataset.tumor_alt_info[start_pos:end_pos] # .flatten()
ref_center_list = dataset.ref_center[start_pos:end_pos]
yield input_matrix, position, tumor_alt_info_list, ref_center_list
def predict(args):
global output_config
global call_fn
output_config = OutputConfig(
is_show_reference=args.show_ref,
quality_score_for_pass=args.qual,
pileup=args.pileup,
enable_indel_calling=args.enable_indel_calling
)
param.flankingBaseNum = param.flankingBaseNum if args.flanking is None else args.flanking
param.no_of_positions = param.flankingBaseNum * 2 + 1
param.min_rescale_cov = param.min_rescale_cov if args.min_rescale_cov is None else args.min_rescale_cov
predict_fn = args.predict_fn
use_gpu = args.use_gpu
variant_call_start_time = time()
call_fn = args.call_fn
chkpnt_fn_acgt = args.chkpnt_fn_acgt
tensor_fn_acgt = args.tensor_fn_acgt
chkpnt_fn_nacgt = args.chkpnt_fn_nacgt
tensor_fn_nacgt = args.tensor_fn_nacgt
platform = args.platform
torch.set_num_threads(1)
torch.manual_seed(0)
np.random.seed(0)
if use_gpu and not torch.cuda.is_available():
print("[WARNING] --use_gpu is enabled, but cuda is not found")
use_gpu = False
if use_gpu:
device = 'cuda'
else:
os.environ["CUDA_VISIBLE_DEVICES"] = ""
device = 'cpu'
if call_fn is not None:
call_dir = os.path.dirname(call_fn)
if not os.path.exists(call_dir):
output = run("mkdir -p {}".format(call_dir), shell=True)
vcf_writer = VcfWriter(vcf_fn=args.call_fn,
ref_fn=args.ref_fn,
show_ref_calls=args.show_ref,
sample_name=args.sample_name,
)
output_file = vcf_writer
elif predict_fn != "PIPE":
predict_dir = os.path.dirname(predict_fn)
if not os.path.exists(predict_dir):
output = run("mkdir -p {}".format(predict_dir), shell=True)
predict_fn_fpo = open(predict_fn, "wb")
predict_fn_fp = subprocess_popen(shlex.split("{} -c".format(param.zstd)), stdin=PIPE, stdout=predict_fn_fpo)
output_file = predict_fn_fp.stdin
else:
predict_fn_fp = TensorStdout(sys.stdout)
output_file = predict_fn_fp.stdin
global test_pos
test_pos = None
model_p = torch.load(chkpnt_fn_acgt, map_location=torch.device(device))
model_acgt = model_p['model_acgt']
model_r = torch.load(chkpnt_fn_nacgt, map_location=torch.device(device))
model_nacgt = model_r['model_nacgt']
model_acgt.eval()
model_nacgt.eval()
total = 0
softmax = torch.nn.Softmax(dim=1)
if not args.is_from_tables:
is_finish_loaded_all_mini_batches = False
mini_batches_loaded_acgt = []
mini_batches_to_output_acgt = []
mini_batches_loaded_nacgt = []
mini_batches_to_output_nacgt = []
mini_batches_loaded_acgt_ori = []
mini_batches_to_output_acgt_ori = []
def load_mini_batch():
try:
mini_batches_loaded_acgt.append(next(tensor_generator_acgt))
mini_batches_loaded_nacgt.append(next(tensor_generator_nacgt))
mini_batches_loaded_acgt_ori.append(next(tensor_generator_acgt_ori))
except StopIteration:
return
tensor_generator_acgt = tensor_generator_from(tensor_file_path=tensor_fn_acgt,
batch_size=param.predictBatchSize,
pileup=args.pileup,
min_rescale_cov=param.min_rescale_cov,
platform=platform)
tensor_generator_nacgt = tensor_generator_from(tensor_file_path=tensor_fn_nacgt,
batch_size=param.predictBatchSize,
pileup=args.pileup,
min_rescale_cov=param.min_rescale_cov,
platform=platform)
tensor_generator_acgt_ori = tensor_generator_from(tensor_file_path=tensor_fn_acgt,
batch_size=param.predictBatchSize,
pileup=args.pileup,
min_rescale_cov=None,
platform=platform)
| while True: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: microsoft/folx
# Path: folx/api.py
T = TypeVar("T", bound=PyTree[Array])
R = TypeVar("R", bound=PyTree[Array])
JAC_DIM = 0 # should be either 0 or -1. TODO: switching is not support.
GENERAL = 0
LINEAR_IN_FIRST = 1
LINEAR_IN_ONE = 2 | LINEAR_IN_FIRST
LINEAR = 4 | LINEAR_IN_ONE
REDUCTION = 8
MULTIPLICATION = 16 | LINEAR_IN_ONE
DOT_PRODUCT = 32 | REDUCTION | MULTIPLICATION
INDEXING = 64 | LINEAR
SCATTER = 128
JOIN_JVP = 256
class FwdJacobian(NamedTuple):
class FwdLaplArray(NamedTuple):
class FwdLaplArgs(NamedTuple):
class MergeFn(Protocol):
class ForwardLaplacianFns(NamedTuple):
class JvpFn(Protocol):
class CustomTraceJacHessianJac(Protocol):
class ForwardLaplacian(Protocol):
class FunctionFlags(IntFlag):
def weak(self) -> bool:
def unique_idx(self):
def materialize_for_idx(self, idx, max_idx: int | None = None):
def aggregate(x, indices):
def get_index_mask(self, outputs):
def get_indices(mask, out_mask):
def data_shape(self):
def construct_jac_for(self, idx):
def dense_array(self) -> Array:
def max_n(self) -> int:
def as_dense(self):
def dense_or_sparse(self) -> Array:
def sparse(self) -> Array:
def mask(self) -> np.ndarray:
def ndim(self) -> int:
def from_dense(cls, array):
def __add__(self, other):
def astype(self, dtype):
def shape(self):
def ndim(self):
def dense_jacobian(self):
def is_jacobian_weak(self):
def sparse_jacobian(self):
def jacobian_mask(self):
def dense(self):
def astype(self, dtype):
def IS_LPL_ARR(x):
def IS_LEAF(x):
def x(self) -> Arrays:
def jacobian(self) -> tuple[FwdJacobian, ...]:
def dense_jacobian(self) -> Arrays:
def sparse_jacobian(self) -> Arrays:
def jacobian_mask(self):
def all_jacobian_weak(self) -> bool:
def any_jacobian_weak(self) -> bool:
def dense(self):
def laplacian(self) -> Arrays:
def one_hot_sparse_jacobian(self):
def __len__(self) -> int:
def __call__(self, args: Arrays, extra: ExtraArgs) -> Arrays:
def __call__(self, primals: Arrays, tangents: Arrays) -> tuple[Array, Array]:
def __call__(self, args: FwdLaplArgs, extra_args: ExtraArgs, merge: MergeFn, materialize_idx: Array) -> PyTree[Array]:
def __call__(self, *args: ArrayOrFwdLaplArray, sparsity_threshold: int , **kwargs) -> PyTree[ArrayOrFwdLaplArray]:
# Path: folx/utils.py
def extract_jacobian_mask(arrays: Sequence[ArrayOrFwdLaplArray]):
indices = []
for arr in arrays:
if isinstance(arr, FwdLaplArray):
indices.append(arr.jacobian.x0_idx)
def merge(arrs: ArrayOrFwdLaplArray):
idx_iter = iter(indices)
return [
arr._replace(jacobian=arr.jacobian._replace(x0_idx=next(idx_iter))) if isinstance(arr, FwdLaplArray) else arr
for arr in arrs
]
return merge
# Path: folx/utils.py
def ravel(pytree):
"""
An implementation of jax.flatten_util.ravel_pytree that does not
require the leaves to be jax.Array when unflattening.
"""
leaves, tree_def = jtu.tree_flatten(pytree)
shapes = [l.shape for l in leaves]
flat = jnp.concatenate([l.ravel() for l in leaves])
def unravel(arr):
unravelled = []
idx = 0
for shape in shapes:
size = np.prod(shape, dtype=int)
unravelled.append(arr[idx : idx + size].reshape(shape))
idx += size
return tree_def.unflatten(unravelled)
return flat, unravel
# Path: folx/wrapped_functions.py
R = TypeVar("R", bound=PyTree[Array])
P = ParamSpec("P")
_LAPLACE_FN_REGISTRY: dict[Primitive | str, ForwardLaplacian] = {
jax.lax.dot_general_p: dot_general,
jax.lax.abs_p: wrap_forward_laplacian(jax.lax.abs, flags=FunctionFlags.LINEAR, in_axes=()),
jax.lax.neg_p: wrap_forward_laplacian(jax.lax.neg, flags=FunctionFlags.LINEAR, in_axes=()),
jax.lax.add_p: wrap_forward_laplacian(jax.lax.add, flags=FunctionFlags.LINEAR, in_axes=()),
jax.lax.sub_p: wrap_forward_laplacian(jax.lax.sub, flags=FunctionFlags.LINEAR, in_axes=()),
jax.lax.mul_p: wrap_forward_laplacian(jax.lax.mul, flags=FunctionFlags.MULTIPLICATION, in_axes=()),
jax.lax.div_p: wrap_forward_laplacian(
jax.lax.div, flags=FunctionFlags.LINEAR_IN_FIRST, in_axes=()
),
jax.lax.pow_p: wrap_forward_laplacian(jax.lax.pow, in_axes=()),
jax.lax.integer_pow_p: wrap_forward_laplacian(jax.lax.integer_pow, in_axes=()),
jax.lax.sign_p: warp_without_fwd_laplacian(jax.lax.sign),
jax.lax.reduce_sum_p: wrap_forward_laplacian(
jax.lax.reduce_sum_p.bind,
flags=FunctionFlags.REDUCTION | FunctionFlags.LINEAR,
name="reduce_sum",
),
jax.lax.reduce_max_p: wrap_forward_laplacian(
jax.lax.reduce_max_p.bind,
flags=FunctionFlags.REDUCTION | FunctionFlags.LINEAR,
name="reduce_max",
),
jax.lax.reduce_min_p: wrap_forward_laplacian(
jax.lax.reduce_min_p.bind,
flags=FunctionFlags.REDUCTION | FunctionFlags.LINEAR,
name="reduce_min",
),
jax.lax.reduce_prod_p: wrap_forward_laplacian(
jax.lax.reduce_prod_p.bind, flags=FunctionFlags.REDUCTION, name="reduce_prod"
),
jax.lax.cumsum_p: wrap_forward_laplacian(jax.lax.cumsum, flags=FunctionFlags.LINEAR),
jax.lax.sqrt_p: wrap_forward_laplacian(jax.lax.sqrt, in_axes=()),
jax.lax.rsqrt_p: wrap_forward_laplacian(jax.lax.rsqrt, in_axes=()),
jax.lax.log_p: wrap_forward_laplacian(jax.lax.log, in_axes=()),
jax.lax.log1p_p: wrap_forward_laplacian(jax.lax.log1p, in_axes=()),
jax.lax.exp_p: wrap_forward_laplacian(jax.lax.exp, in_axes=()),
jax.lax.expm1_p: wrap_forward_laplacian(jax.lax.expm1, in_axes=()),
jax.lax.tanh_p: wrap_forward_laplacian(jax.lax.tanh, in_axes=()),
jax.lax.logistic_p: wrap_forward_laplacian(jax.lax.logistic, in_axes=()),
jax.lax.acos_p: wrap_forward_laplacian(jax.lax.acos, in_axes=()),
jax.lax.asin_p: wrap_forward_laplacian(jax.lax.asin, in_axes=()),
jax.lax.atan_p: wrap_forward_laplacian(jax.lax.atan, in_axes=()),
jax.lax.atan2_p: wrap_forward_laplacian(jax.lax.atan2, in_axes=()),
jax.lax.cos_p: wrap_forward_laplacian(jax.lax.cos, in_axes=()),
jax.lax.sin_p: wrap_forward_laplacian(jax.lax.sin, in_axes=()),
jax.lax.tan_p: wrap_forward_laplacian(jax.lax.tan, in_axes=()),
jax.lax.broadcast_in_dim_p: wrap_forward_laplacian(
jax.lax.broadcast_in_dim, flags=FunctionFlags.INDEXING
),
jax.lax.reshape_p: wrap_forward_laplacian(jax.lax.reshape, flags=FunctionFlags.INDEXING),
jax.lax.slice_p: wrap_forward_laplacian(jax.lax.slice, flags=FunctionFlags.INDEXING),
jax.lax.dynamic_slice_p: wrap_forward_laplacian(
jax.lax.dynamic_slice_p.bind,
flags=FunctionFlags.INDEXING,
name="slice",
index_static_args=slice(1, None),
),
jax.lax.concatenate_p: wrap_forward_laplacian(
jax.lax.concatenate_p.bind,
flags=FunctionFlags.INDEXING,
name="concatenate",
index_static_args=(),
),
jax.lax.select_n_p: wrap_forward_laplacian(
jax.lax.select_n, flags=FunctionFlags.INDEXING, index_static_args=(0,)
),
jax.lax.gather_p: wrap_forward_laplacian(
jax.lax.gather_p.bind, flags=FunctionFlags.INDEXING, name="gather"
),
jax.lax.transpose_p: wrap_forward_laplacian(jax.lax.transpose, flags=FunctionFlags.INDEXING),
jax.lax.squeeze_p: wrap_forward_laplacian(jax.lax.squeeze, flags=FunctionFlags.INDEXING),
jax.lax.rev_p: wrap_forward_laplacian(jax.lax.rev, flags=FunctionFlags.INDEXING),
jax.lax.max_p: wrap_forward_laplacian(jax.lax.max, in_axes=(), flags=FunctionFlags.LINEAR),
jax.lax.min_p: wrap_forward_laplacian(jax.lax.min, in_axes=(), flags=FunctionFlags.LINEAR),
jax.lax.scatter_p: wrap_forward_laplacian(
jax.lax.scatter_p.bind, flags=FunctionFlags.INDEXING | FunctionFlags.SCATTER, name="scatter"
),
jax.lax.scatter_add_p: wrap_forward_laplacian(
jax.lax.scatter_add_p.bind,
flags=FunctionFlags.LINEAR | FunctionFlags.SCATTER,
name="scatter_add",
),
jax.lax.stop_gradient_p: warp_without_fwd_laplacian(jax.lax.stop_gradient),
jax.lax.eq_p: warp_without_fwd_laplacian(jax.lax.eq),
jax.lax.lt_p: warp_without_fwd_laplacian(jax.lax.lt),
jax.lax.le_p: warp_without_fwd_laplacian(jax.lax.le),
jax.lax.gt_p: warp_without_fwd_laplacian(jax.lax.gt),
jax.lax.ge_p: warp_without_fwd_laplacian(jax.lax.ge),
jax.lax.ne_p: warp_without_fwd_laplacian(jax.lax.ne),
jax.lax.xor_p: warp_without_fwd_laplacian(jax.lax.bitwise_xor),
jax.lax.not_p: warp_without_fwd_laplacian(jax.lax.bitwise_not),
jax.lax.and_p: warp_without_fwd_laplacian(jax.lax.bitwise_and),
jax.lax.or_p: warp_without_fwd_laplacian(jax.lax.bitwise_or),
jax.lax.is_finite_p: warp_without_fwd_laplacian(jax.lax.is_finite),
jax.lax.convert_element_type_p: dtype_conversion,
"sign": warp_without_fwd_laplacian(jax.lax.sign),
"logaddexp": wrap_forward_laplacian(jnp.logaddexp, in_axes=()),
"sigmoid": wrap_forward_laplacian(jax.nn.sigmoid, in_axes=()),
"softplus": wrap_forward_laplacian(jax.nn.softplus, in_axes=()),
"silu": wrap_forward_laplacian(jax.nn.silu, in_axes=()),
"slogdet": slogdet_wrapper,
}
def rearrange(x, contract_dims, batch_dims, brdcast_dims, other_brdcast_dims, rhs=False):
def dot_general(
lhs: ArrayOrFwdLaplArray,
rhs: ArrayOrFwdLaplArray,
*_: ArrayOrFwdLaplArray,
dimension_numbers: tuple[
tuple[tuple[int, ...], tuple[int, ...]], tuple[tuple[int, ...], tuple[int, ...]]
],
precision=None,
preferred_element_type=None,
sparsity_threshold: int = 0,
**__,
) -> ArrayOrFwdLaplArray:
def dot_last(lhs: Array, rhs: Array) -> Array:
def dtype_conversion(arr: ArrayOrFwdLaplArray, *_: ArrayOrFwdLaplArray, new_dtype: DTypeLike, sparsity_threshold: int, **kwargs):
def slogdet(x):
def slogdet_jvp(primals, tangents):
def custom_jvp(jacobian, tangent):
def slogdet_wrapper(x: ArrayOrFwdLaplArray, *_: ArrayOrFwdLaplArray, sparsity_threshold: int, **__):
def register_function(
primitive_or_name: Primitive | str,
laplacian: ForwardLaplacian
):
def deregister_function(primitive_or_name: Primitive | str):
def is_registered(primitive_or_name: Primitive | str) -> bool:
def get_laplacian(primitive_or_name: Primitive, wrap_if_missing: Literal[True]) -> ForwardLaplacian:
def get_laplacian(primitive_or_name: Primitive | str, wrap_if_missing: Literal[False] = False) -> ForwardLaplacian | None:
def get_laplacian(primitive_or_name: Primitive | str, wrap_if_missing: bool = False) -> ForwardLaplacian | None:
# Path: folx/interpreter.py
import functools
import logging
import jax
import jax.numpy as jnp
import jax.tree_util as jtu
import numpy as np
from collections import defaultdict
from typing import Callable, ParamSpec, Sequence, TypeVar
from jax import core
from jax.util import safe_map
from .api import Array, ArrayOrFwdLaplArray, FwdJacobian, FwdLaplArray, PyTree
from .utils import extract_jacobian_mask, ravel
from .wrapped_functions import get_laplacian, wrap_forward_laplacian
R = TypeVar("R", bound=PyTree[Array])
P = ParamSpec("P")
class JaxExprEnvironment:
# A simple environment that keeps track of the variables
# and frees them once they are no longer needed.
env: dict[core.Var, ArrayOrFwdLaplArray]
reference_counter: dict[core.Var, int]
def __init__(self, jaxpr: core.Jaxpr, consts: Sequence[Array], *args: ArrayOrFwdLaplArray):
self.env = {}
self.reference_counter = defaultdict(int)
for v in jaxpr.invars + jaxpr.constvars:
if isinstance(v, core.Literal):
continue
self.reference_counter[v] += 1
eqn: core.JaxprEqn
for eqn in jaxpr.eqns:
for v in eqn.invars:
if isinstance(v, core.Literal):
continue
self.reference_counter[v] += 1
for v in jaxpr.outvars:
if isinstance(v, core.Literal):
continue
self.reference_counter[v] = np.iinfo(np.int32).max
self.write_many(jaxpr.constvars, consts)
self.write_many(jaxpr.invars, args)
def read(self, var: core.Atom) -> ArrayOrFwdLaplArray:
if isinstance(var, core.Literal):
return var.val
self.reference_counter[var] -= 1
result = self.env[var]
if self.reference_counter[var] == 0:
del self.env[var]
del self.reference_counter[var]
return result
def write(self, var: core.Var, val: ArrayOrFwdLaplArray):
if self.reference_counter[var] > 0:
self.env[var] = val
def read_many(self, vars: Sequence[core.Atom]) -> list[ArrayOrFwdLaplArray]:
return safe_map(self.read, vars)
def write_many(self, vars: Sequence[core.Var], vals: Sequence[ArrayOrFwdLaplArray]):
return safe_map(self.write, vars, vals)
def eval_jaxpr_with_forward_laplacian(jaxpr: core.Jaxpr, consts, *args, sparsity_threshold: int):
enable_sparsity = sparsity_threshold > 0
env = JaxExprEnvironment(jaxpr, consts, *args)
def eval_scan(eqn: core.JaxprEqn, invals):
n_carry, n_const = eqn.params["num_carry"], eqn.params["num_consts"]
in_const, in_carry, in_inp = invals[:n_const], invals[n_const:n_carry+n_const], invals[n_const+n_carry:]
carry_merge = extract_jacobian_mask(in_carry)
assert all(isinstance(x, Array) for x in in_inp), "Scan does not support scanning over input depenedent tensors.\nPlease unroll the loop."
def wrapped(carry, x):
result = eval_jaxpr_with_forward_laplacian(
eqn.params['jaxpr'].jaxpr,
(),
*in_const,
*carry_merge(carry),
*x,
sparsity_threshold=sparsity_threshold
)
return result[:n_carry], result[n_carry:]
first_carry, first_y = wrapped(in_carry, jtu.tree_map(lambda x: x[0], in_inp))
# Check whether jacobian sparsity matches
| for a, b in zip(in_carry, first_carry): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: shuttworth/NICE-SLAM-Easyread
# Path: src/config.py
def load_config(path, default_path=None):
def update_recursive(dict1, dict2):
def get_model(cfg, nice=True):
# Path: src/NICE_SLAM.py
class NICE_SLAM():
"""
NICE_SLAM main class.
Mainly allocate shared resources, and dispatch mapping and tracking process.
"""
def __init__(self, cfg, args):
# 初始化配置和参数
self.cfg = cfg
self.args = args
self.nice = args.nice
# 从配置中读取各种设置
self.coarse = cfg['coarse']
self.occupancy = cfg['occupancy']
self.low_gpu_mem = cfg['low_gpu_mem']
self.verbose = cfg['verbose']
self.dataset = cfg['dataset']
self.coarse_bound_enlarge = cfg['model']['coarse_bound_enlarge']
# 设置输出目录
if args.output is None:
self.output = cfg['data']['output']
else:
self.output = args.output
self.ckptsdir = os.path.join(self.output, 'ckpts')
os.makedirs(self.output, exist_ok=True)
os.makedirs(self.ckptsdir, exist_ok=True)
os.makedirs(f'{self.output}/mesh', exist_ok=True)
# 读取相机配置
self.H, self.W, self.fx, self.fy, self.cx, self.cy = cfg['cam']['H'], cfg['cam'][
'W'], cfg['cam']['fx'], cfg['cam']['fy'], cfg['cam']['cx'], cfg['cam']['cy']
self.update_cam()
# 初始化模型
model = config.get_model(cfg, nice=self.nice)
self.shared_decoders = model
# 加载其他配置
self.scale = cfg['scale']
self.load_bound(cfg)
if self.nice:
self.load_pretrain(cfg)
self.grid_init(cfg)
else:
self.shared_c = {}
# need to use spawn
# 设置多进程启动方法
try:
mp.set_start_method('spawn', force=True)
except RuntimeError:
pass
# 初始化帧读取器
self.frame_reader = get_dataset(cfg, args, self.scale)
self.n_img = len(self.frame_reader)
# 初始化估计的相机位姿列表
self.estimate_c2w_list = torch.zeros((self.n_img, 4, 4))
self.estimate_c2w_list.share_memory_()
# 初始化真实的相机位姿列表
self.gt_c2w_list = torch.zeros((self.n_img, 4, 4))
self.gt_c2w_list.share_memory_()
# 初始化其他共享内存变量
self.idx = torch.zeros((1)).int()
self.idx.share_memory_()
self.mapping_first_frame = torch.zeros((1)).int()
self.mapping_first_frame.share_memory_()
# the id of the newest frame Mapper is processing
self.mapping_idx = torch.zeros((1)).int()
self.mapping_idx.share_memory_()
self.mapping_cnt = torch.zeros((1)).int() # counter for mapping
self.mapping_cnt.share_memory_()
# 将共享变量移至指定设备并共享内存
for key, val in self.shared_c.items():
val = val.to(self.cfg['mapping']['device'])
val.share_memory_()
self.shared_c[key] = val
# 初始化渲染器、网格生成器、日志记录器
self.shared_decoders = self.shared_decoders.to(
self.cfg['mapping']['device'])
self.shared_decoders.share_memory()
self.renderer = Renderer(cfg, args, self)
self.mesher = Mesher(cfg, args, self)
self.logger = Logger(cfg, args, self)
self.mapper = Mapper(cfg, args, self, coarse_mapper=False)
# 初始化映射器和追踪器
if self.coarse:
self.coarse_mapper = Mapper(cfg, args, self, coarse_mapper=True)
self.tracker = Tracker(cfg, args, self)
# 打印输出描述
self.print_output_desc()
def print_output_desc(self):
# 打印输出信息,在上方的__init__里调用
print(f"INFO: The output folder is {self.output}")
if 'Demo' in self.output:
print(
f"INFO: The GT, generated and residual depth/color images can be found under " +
f"{self.output}/vis/")
else:
print(
f"INFO: The GT, generated and residual depth/color images can be found under " +
f"{self.output}/tracking_vis/ and {self.output}/mapping_vis/")
print(f"INFO: The mesh can be found under {self.output}/mesh/")
print(f"INFO: The checkpoint can be found under {self.output}/ckpt/")
# 根据预处理配置更新相机的内参,这可能包括调整图像大小或裁剪边缘,在__init__里调用
def update_cam(self):
"""
Update the camera intrinsics according to pre-processing config,
such as resize or edge crop.
"""
# resize the input images to crop_size (variable name used in lietorch)
# 检查配置中是否有 crop_size 参数。如果有,它将调整相机的焦距和主点坐标以适应新的图像尺寸
# sx 和 sy 是宽度和高度的缩放比例,分别用于调整焦距(fx, fy)和主点坐标(cx, cy)。最后,更新图像的宽度(W)和高度(H)为新的裁剪尺寸
if 'crop_size' in self.cfg['cam']:
crop_size = self.cfg['cam']['crop_size']
sx = crop_size[1] / self.W
sy = crop_size[0] / self.H
self.fx = sx*self.fx
self.fy = sy*self.fy
self.cx = sx*self.cx
self.cy = sy*self.cy
self.W = crop_size[1]
self.H = crop_size[0]
# croping will change H, W, cx, cy, so need to change here
# 检查配置中是否有 crop_edge 参数,用于裁剪图像边缘,如果 crop_edge 大于0(nice_slam.yaml里的crop_edge值是0),它将从图像的宽度和高度中减去两倍的 crop_edge 值,并相应地调整主点坐标
if self.cfg['cam']['crop_edge'] > 0:
self.H -= self.cfg['cam']['crop_edge']*2
self.W -= self.cfg['cam']['crop_edge']*2
self.cx -= self.cfg['cam']['crop_edge']
self.cy -= self.cfg['cam']['crop_edge']
# 加载和设置场景的边界参数,在__init__里调用
def load_bound(self, cfg):
"""
Pass the scene bound parameters to different decoders and self.
Args:
cfg (dict): parsed config dict.
"""
# scale the bound if there is a global scaling factor
# 从配置中读取边界参数,并将其转换为一个PyTorch张量。边界参数被乘以一个全局缩放因子 self.scale(nice_slam.yaml里的scale值是1),用于调整场景的大小
self.bound = torch.from_numpy(
np.array(cfg['mapping']['bound'])*self.scale)
bound_divisible = cfg['grid_len']['bound_divisible']
# enlarge the bound a bit to allow it divisible by bound_divisible
# 调整边界的上限,使其可以被 bound_divisible 整除
self.bound[:, 1] = (((self.bound[:, 1]-self.bound[:, 0]) /
bound_divisible).int()+1)*bound_divisible+self.bound[:, 0]
# 如果执行的是nice-slam的算法
if self.nice:
self.shared_decoders.bound = self.bound
self.shared_decoders.middle_decoder.bound = self.bound
self.shared_decoders.fine_decoder.bound = self.bound
self.shared_decoders.color_decoder.bound = self.bound
# 如果粗层场景表达是coarse,给乘以一个额外的扩大因子 self.coarse_bound_enlarge,粗粒度解码器需要处理更大范围的场景数据
if self.coarse:
self.shared_decoders.coarse_decoder.bound = self.bound*self.coarse_bound_enlarge
# 加载预先训练的ConvOnet参数,在__init__里调用
# ConvONet论文:https://arxiv.org/pdf/2003.04618.pdf
def load_pretrain(self, cfg):
"""
Load parameters of pretrained ConvOnet checkpoints to the decoders.
Args:
cfg (dict): parsed config dict
"""
if self.coarse:
# ckpt加载coarse权重(从yaml的pretrained_decoders里)
ckpt = torch.load(cfg['pretrained_decoders']['coarse'],
map_location=cfg['mapping']['device'])
# 初始化一个空字典,用于存储调整后的权重
coarse_dict = {}
# 遍历模型权重,只处理解码器的权重,排除编码器的权重
for key, val in ckpt['model'].items():
if ('decoder' in key) and ('encoder' not in key):
key = key[8:]
coarse_dict[key] = val
# 加载权重到解码器
self.shared_decoders.coarse_decoder.load_state_dict(coarse_dict)
# ckpt加载middle_fine权重(从yaml的pretrained_decoders里)
ckpt = torch.load(cfg['pretrained_decoders']['middle_fine'],
map_location=cfg['mapping']['device'])
middle_dict = {}
fine_dict = {}
for key, val in ckpt['model'].items():
if ('decoder' in key) and ('encoder' not in key):
if 'coarse' in key:
key = key[8+7:]
middle_dict[key] = val
elif 'fine' in key:
key = key[8+5:]
fine_dict[key] = val
self.shared_decoders.middle_decoder.load_state_dict(middle_dict)
self.shared_decoders.fine_decoder.load_state_dict(fine_dict)
# 分层特征网格初始化
def grid_init(self, cfg):
"""
Initialize the hierarchical feature grids.
Args:
cfg (dict): parsed config dict.
"""
# 各项grid_len参数设置见yaml里的值
if self.coarse:
coarse_grid_len = cfg['grid_len']['coarse']
self.coarse_grid_len = coarse_grid_len
middle_grid_len = cfg['grid_len']['middle']
self.middle_grid_len = middle_grid_len
fine_grid_len = cfg['grid_len']['fine']
self.fine_grid_len = fine_grid_len
color_grid_len = cfg['grid_len']['color']
self.color_grid_len = color_grid_len
c = {}
# 特征向量维度c_dim和场景边界xyz_len
c_dim = cfg['model']['c_dim']
xyz_len = self.bound[:, 1]-self.bound[:, 0]
# If you have questions regarding the swap of axis 0 and 2,
# please refer to https://github.com/cvg/nice-slam/issues/24
if self.coarse:
coarse_key = 'grid_coarse'
coarse_val_shape = list(
map(int, (xyz_len*self.coarse_bound_enlarge/coarse_grid_len).tolist()))
coarse_val_shape[0], coarse_val_shape[2] = coarse_val_shape[2], coarse_val_shape[0]
self.coarse_val_shape = coarse_val_shape
val_shape = [1, c_dim, *coarse_val_shape]
# 初始化一个具有特定形状和尺寸的零张量,并用标准正态分布填充,mid fine color同理;标准正态分布是深度学习中常见的权重初始化方法,有助于模型的训练和收敛
coarse_val = torch.zeros(val_shape).normal_(mean=0, std=0.01)
c[coarse_key] = coarse_val
middle_key = 'grid_middle'
middle_val_shape = list(map(int, (xyz_len/middle_grid_len).tolist()))
middle_val_shape[0], middle_val_shape[2] = middle_val_shape[2], middle_val_shape[0]
self.middle_val_shape = middle_val_shape
val_shape = [1, c_dim, *middle_val_shape]
middle_val = torch.zeros(val_shape).normal_(mean=0, std=0.01)
c[middle_key] = middle_val
fine_key = 'grid_fine'
fine_val_shape = list(map(int, (xyz_len/fine_grid_len).tolist()))
fine_val_shape[0], fine_val_shape[2] = fine_val_shape[2], fine_val_shape[0]
self.fine_val_shape = fine_val_shape
val_shape = [1, c_dim, *fine_val_shape]
fine_val = torch.zeros(val_shape).normal_(mean=0, std=0.0001) # 精细网格使用更小的标准差进行初始化
c[fine_key] = fine_val
color_key = 'grid_color'
color_val_shape = list(map(int, (xyz_len/color_grid_len).tolist()))
color_val_shape[0], color_val_shape[2] = color_val_shape[2], color_val_shape[0]
self.color_val_shape = color_val_shape
val_shape = [1, c_dim, *color_val_shape]
color_val = torch.zeros(val_shape).normal_(mean=0, std=0.01)
c[color_key] = color_val
# 所有初始化的网格(粗糙、中等、精细、颜色)被存储在一个字典 c 中,每个网格对应一个键(例如,'grid_coarse', 'grid_middle' 等)
# 这个字典随后被赋值给 self.shared_c,使得这些网格可以在整个类的其他方法中被共享和访问
self.shared_c = c
def tracking(self, rank):
"""
Tracking Thread.
Args:
rank (int): Thread ID.
"""
# should wait until the mapping of first frame is finished
# 一定要进行了初始化、确定了世界坐标系,才能够进行Tracking;
# 而NICE-SLAM这样的NeRF based SLAM初始化的办法就是把第一帧图像拍摄的相机位置作为世界坐标系原点,然后先建图再去跟踪;
# 在Tracking中,只优化camera pose,不优化hierarchical scene representation
while (1):
if self.mapping_first_frame[0] == 1:
break
time.sleep(1)
self.tracker.run()
def mapping(self, rank):
"""
Mapping Thread. (updates middle, fine, and color level)
Args:
rank (int): Thread ID.
"""
self.mapper.run()
def coarse_mapping(self, rank):
"""
Coarse mapping Thread. (updates coarse level)
Args:
rank (int): Thread ID.
"""
self.coarse_mapper.run()
def run(self):
"""
Dispatch Threads.
"""
processes = []
for rank in range(3):
# 当 rank 为 0 时,创建一个tracking进程;为1时,创建一个mapping进程;为2时,进行self.coarse的判断,通过则执行coarse_mapping线程
if rank == 0:
p = mp.Process(target=self.tracking, args=(rank, ))
elif rank == 1:
p = mp.Process(target=self.mapping, args=(rank, ))
elif rank == 2:
if self.coarse:
p = mp.Process(target=self.coarse_mapping, args=(rank, ))
else:
continue
p.start()
processes.append(p)
for p in processes:
p.join()
# Path: run.py
import argparse
import random
import numpy as np
import torch
from src import config
from src.NICE_SLAM import NICE_SLAM
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def main():
# setup_seed(20)
parser = argparse.ArgumentParser(
description='Arguments for running the NICE-SLAM/iMAP*.'
)
parser.add_argument('config', type=str, help='Path to config file.')
parser.add_argument('--input_folder', type=str,
help='input folder, this have higher priority, can overwrite the one in config file')
parser.add_argument('--output', type=str,
| help='output folder, this have higher priority, can overwrite the one in config file') |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: sb-ai-lab/HypEx
# Path: hypex/matcher.py
class Matcher:
"""Class for compile full pipeline of Matching in Causal Inference task.
Matcher steps:
- Read, analyze data
- Feature selection via LightAutoML
- Converting a dataset with features to another space via Cholesky decomposition
In the new space, the distance L2 becomes equivalent to the Mahalanobis distance.
This allows us to use faiss to search for nearest objects, which can search only by L2 metric,
but without violating the methodology of matching,
for which it is important to count by the Mahalanobis distance
- Finding the nearest neighbors for each unit (with duplicates) using faiss.
For each of the control group, neighbors from the target group are matched and vice versa.
- Calculation bias
- Creating matched df (Wide df with pairs)
- Calculation metrics: ATE, ATT, ATC, p-value, and сonfidence intervals
- Calculation quality: PS-test, KS test, SMD test
- Returns metrics as dataframe, quality results as dict of df's and df_matched
- After receiving the result, the result should be validated using :func:`~hypex.matcher.Matcher.validate_result`
Example:
Common usecase - base pipeline for matching
>>> # Base info
>>> treatment = "treatment" # Column name with info about 'treatment' 0 or 1
>>> target = "target" # Column name with target
>>>
>>> # Optional
>>> info_col = ["user_id", 'address'] # Columns that will not participate in the match and are informative.
>>> group_col = "CatCol" # Column name for strict comparison (for a categorical feature)
>>>
>>> # Matching
>>> model = Matcher(data, outcome=target, treatment=treatment, info_col=info_col, group_col=group_col)
>>> features = model.lama_feature_select() # Feature selection via lama
>>> results, quality, df_matched = model.estimate(features=some_features) # Performs matching
>>>
>>> model.validate_result()
"""
def __init__(
self,
input_data: pd.DataFrame,
treatment: str,
outcome: Union[str, list] = None,
outcome_type: str = "numeric",
group_col: str = None,
info_col: list = None,
weights: dict = None,
base_filtration: bool = False,
generate_report: bool = GENERATE_REPORT,
report_feat_select_dir: str = REPORT_FEAT_SELECT_DIR,
timeout: int = TIMEOUT,
n_threads: int = N_THREADS,
n_folds: int = N_FOLDS,
verbose: bool = VERBOSE,
use_algos: list = None,
same_target_threshold: float = SAME_TARGET_THRESHOLD,
interquartile_coeff: float = OUT_INTER_COEFF,
drop_outliers_by_percentile: bool = OUT_MODE_PERCENT,
min_percentile: float = OUT_MIN_PERCENT,
max_percentile: float = OUT_MAX_PERCENT,
n_neighbors: int = 1,
silent: bool = True,
pbar: bool = True,
):
"""Initialize the Matcher object.
Args:
input_data:
Input dataframe
outcome:
Target column
treatment:
Column determine control and test groups
outcome_type:
Values type of target column. Defaults to "numeric"
group_col:
Column for grouping. Defaults to None.
info_col:
Columns with id, date or metadata, not taking part in calculations. Defaults to None
weights:
weights for numeric columns in order to increase matching quality by weighted feature.
By default, is None (all features have the same weight equal to 1). Example: {'feature_1': 10}
base_filtration:
To use or not base filtration of features in order to remove all constant or almost all constant, bool.
Default is False.
generate_report:
Flag to create report. Defaults to True
report_feat_select_dir:
Folder for report files. Defaults to "report_feature_selector"
timeout:
Limit work time of code LAMA. Defaults to 600
n_threads:
Maximum number of threads. Defaults to 1
n_folds:
Number of folds for cross-validation. Defaults to 4
verbose:
Flag to show process stages. Defaults to 2
use_algos:
List of names of LAMA algorithms for feature selection. Defaults to ["lgb"]
same_target_threshold:
Threshold for correlation coefficient filter (Spearman). Default to 0.7
interquartile_coeff:
Percent for drop outliers. Default to 1.5
drop_outliers_by_percentile:
Flag to drop outliers by custom percentiles. Defaults to True
min_percentile:
Minimum percentile to drop outliers. Defaults to 0.02
max_percentile:
Maximum percentile to drop outliers. Defaults to 0.98
n_neighbors:
Number of neighbors to match (in fact you may see more then n matches as every match may have more then
one neighbor with the same distance). Default value is 1.
silent:
Write logs in debug mode
pbar:
Display progress bar while get index
"""
if use_algos is None:
use_algos = USE_ALGOS
self.input_data = input_data
if outcome is None:
outcome = list()
self.outcomes = outcome if type(outcome) == list else [outcome]
self.treatment = treatment
self.group_col = group_col
self.info_col = info_col
self.outcome_type = outcome_type
self.weights = weights
self.generate_report = generate_report
self.report_feat_select_dir = report_feat_select_dir
self.timeout = timeout
self.n_threads = n_threads
self.n_folds = n_folds
self.verbose = verbose
self.use_algos = use_algos
self.same_target_threshold = same_target_threshold
self.interquartile_coeff = interquartile_coeff
self.mode_percentile = drop_outliers_by_percentile
self.min_percentile = min_percentile
self.max_percentile = max_percentile
self.base_filtration = base_filtration
self.features_importance = None
self.matcher = None
self.val_dict = None
self.pval_dict = None
self.new_treatment = None
self.validate = None
self.dropped_features = []
self.n_neighbors = n_neighbors
self.silent = silent
self.pbar = pbar
self._preprocessing_data()
def _convert_categorical_to_dummy(self):
"""Converts categorical variables to dummy variables.
Returns:
Data with categorical variables converted to dummy variables.
"""
info_col = self.info_col if self.info_col is not None else []
group_col = [self.group_col] if self.group_col is not None else []
columns_to_drop = info_col + group_col
if columns_to_drop is not None:
data = self.input_data.drop(columns=columns_to_drop)
else:
data = self.input_data
dummy_data = pd.get_dummies(data, drop_first=True, dtype=np.uint8)
return dummy_data
def _preprocessing_data(self):
"""Converts categorical features into dummy variables."""
info_col = self.info_col if self.info_col is not None else []
group_col = [self.group_col] if self.group_col is not None else []
columns_to_drop = info_col + group_col + self.outcomes + [self.treatment]
if self.base_filtration:
filtered_features = nan_filtration(self.input_data.drop(columns=columns_to_drop))
self.dropped_features = [f for f in self.input_data.columns if f not in filtered_features + columns_to_drop]
self.input_data = self.input_data[filtered_features + columns_to_drop]
nan_counts = self.input_data.isna().sum().sum()
if nan_counts != 0:
self._log(f"Number of NaN values filled with zeros: {nan_counts}", silent=False)
self.input_data = self.input_data.fillna(0)
if self.group_col is not None:
group_col = self.input_data[[self.group_col]]
if self.info_col is not None:
info_col = self.input_data[self.info_col]
self.input_data = self._convert_categorical_to_dummy()
if self.group_col is not None:
self.input_data = pd.concat([self.input_data, group_col], axis=1)
if self.info_col is not None:
self.input_data = pd.concat([self.input_data, info_col], axis=1)
if self.base_filtration:
filtered_features = const_filtration(self.input_data.drop(columns=columns_to_drop))
self.dropped_features = np.concatenate(
(
self.dropped_features,
[f for f in self.input_data.columns if f not in filtered_features + columns_to_drop],
)
)
self.input_data = self.input_data[filtered_features + columns_to_drop]
self._log("Categorical features turned into dummy")
def _apply_filter(self, filter_class, *filter_args):
"""Applies a filter to the input data.
Args:
filter_class:
The class of the filter to apply.
*filter_args:
Arguments to pass to the filter class.
"""
filter_instance = filter_class(*filter_args)
self.input_data = filter_instance.perform_filter(self.input_data)
def _spearman_filter(self):
"""Applies a filter by dropping columns correlated with the outcome column.
This method uses the Spearman filter to eliminate features from the dataset
that are highly correlated with the outcome columns, based on a pre-set threshold
"""
self._log("Applying filter by spearman test - drop columns correlated with outcome")
self._apply_filter(SpearmanFilter, self.outcomes[0], self.treatment, self.same_target_threshold)
def outliers_filter(self):
"""Removes outlier values from the dataset.
This method employs an OutliersFilter. If `drop_outliers_by_percentile` is True,
it retains only the values between the min and max percentiles
If `drop_outliers_by_percentile` is False, it retains only the values between 2nd and 98th percentiles
"""
self._log(
f"Applying filter of outliers\n"
f"interquartile_coeff={self.interquartile_coeff}\n"
f"mode_percentile={self.mode_percentile}\n"
f"min_percentile={self.min_percentile}\n"
f"max_percentile={self.max_percentile}"
)
self._apply_filter(
OutliersFilter, self.interquartile_coeff, self.mode_percentile, self.min_percentile, self.max_percentile
)
def match_no_rep(self, threshold: float = 0.1, approximate_match: bool = False) -> pd.DataFrame:
"""Matching groups with no replacement.
It's done by optimizing the linear sum of
distances between pairs of treatment and control samples.
Args:
threshold: caliper for minimum deviation between test and control groups. in case weights is not None.
approximate_match: use or not approximate matching
Returns:
Matched dataframe with no replacements.
"""
a = self.input_data[self.treatment]
X = self.input_data.drop(columns=self.treatment)
if self.info_col is not None:
X = X.drop(columns=self.info_col)
index_matched = MatcherNoReplacement(X, a, self.weights, approximate_match).match()
filtred_matches = index_matched.loc[1].iloc[self.input_data[a == 1].index].matches[index_matched.loc[1].iloc[self.input_data[a == 1].index].matches.apply(lambda x: x != [])]
if self.weights is not None:
weighted_features = [f for f in self.weights.keys()]
index_dict = dict()
for w in weighted_features:
source = self.input_data.loc[np.concatenate(filtred_matches.values)][w].values
target = self.input_data.loc[filtred_matches.index.to_list()][w].values
index = abs(source - target) <= abs(source) * threshold
index_dict.update({w: index})
index_filtered = sum(index_dict.values()) == len(self.weights)
matched_data = pd.concat(
[self.input_data.loc[filtred_matches.index.to_list()].iloc[index_filtered],
self.input_data.loc[np.concatenate(filtred_matches.values)].iloc[index_filtered]]
)
else:
matched_data = pd.concat([self.input_data.loc[filtred_matches.index.to_list()],
self.input_data.loc[np.concatenate(filtred_matches.values)]])
return matched_data
def lama_feature_select(self) -> pd.DataFrame:
"""Calculates the importance of each feature.
This method use LamaFeatureSelector to rank the importance of each feature in the dataset
The features are then sorted by their importance with the most important feature first
Returns:
The feature importances, sorted in descending order
"""
self._log("Counting feature importance")
feat_select = FeatureSelector(
outcome=self.outcomes[0],
outcome_type=self.outcome_type,
treatment=self.treatment,
timeout=self.timeout,
n_threads=self.n_threads,
n_folds=self.n_folds,
verbose=self.verbose,
generate_report=self.generate_report,
report_dir=self.report_feat_select_dir,
use_algos=self.use_algos,
)
df = self.input_data if self.group_col is None else self.input_data.drop(columns=self.group_col)
if self.info_col is not None:
df = df.drop(columns=self.info_col)
features = feat_select.perform_selection(df=df)
if self.group_col is None:
self.features_importance = features
else:
self.features_importance = features.append(
{"Feature": self.group_col, "Importance": features.Importance.max()}, ignore_index=True
)
return self.features_importance.sort_values("Importance", ascending=False)
def _create_faiss_matcher(self, df=None, validation=None):
"""Creates a FaissMatcher object.
Args:
df:
The dataframe to use. If None, uses self.input_data.
validation:
Whether to use the matcher for validation. If None, determines based on whether
"""
if df is None:
df = self.input_data
self.matcher = FaissMatcher(
df,
self.outcomes,
self.treatment,
info_col=self.info_col,
weights=self.weights,
features=self.features_importance,
group_col=self.group_col,
validation=validation,
n_neighbors=self.n_neighbors,
pbar=False if validation else self.pbar,
)
def _perform_validation(self):
"""Performs validation using the FaissMatcher."""
if self.group_col is None:
sim = self.matcher.match()
else:
sim = self.matcher.group_match()
for key in self.val_dict.keys():
self.val_dict[key].append(sim[key][0])
def _log(self, message, silent=None):
"""Logs a message at the appropriate level.
Args:
message:
The message to log.
silent:
If silent, logs will be only info
"""
if silent is None:
silent = self.silent
if silent:
logger.debug(message)
else:
logger.info(message)
def _matching(self) -> tuple:
"""Performs matching considering the presence of groups.
Returns:
Results of matching and matching quality metrics
"""
self._create_faiss_matcher()
self._log("Applying matching")
self.results, df_matched = self.matcher.match()
self.quality_result = self.matcher.matching_quality(df_matched)
return self.results, self.quality_result, df_matched
def validate_result(
self, refuter: str = "random_feature", effect_type: str = "ate", n_sim: int = 10, fraction: float = 0.8
) -> dict:
"""Validates estimated ATE (Average Treatment Effect).
Validates estimated effect:
1) by replacing real treatment with random placebo treatment.
Estimated effect must be droped to zero, p-val > 0.05;
2) by adding random feature (`random_feature`). Estimated effect shouldn't change
significantly, p-val < 0.05;
3) estimates effect on subset of data (default fraction is 0.8). Estimated effect
shouldn't change significantly, p-val < 0.05.
Args:
refuter:
Refuter type (`random_treatment`, `random_feature`, `subset_refuter`)
effect_type:
Which effect to validate (`ate`, `att`, `atc`)
n_sim:
Number of simulations
fraction:
Subset fraction for subset refuter only
Returns:
Dictionary of outcome_name (mean_effect on validation, p-value)
"""
if self.silent:
logger.debug("Applying validation of result")
else:
logger.info("Applying validation of result")
self.val_dict = {k: [] for k in self.outcomes}
self.pval_dict = dict()
effect_dict = {"ate": 0, "atc": 1, "att": 2}
assert effect_type in effect_dict.keys()
for i in tqdm(range(n_sim)):
if refuter in ["random_treatment", "random_feature"]:
if refuter == "random_treatment":
self.input_data, orig_treatment, self.validate = random_treatment(self.input_data, self.treatment)
elif refuter == "random_feature":
self.input_data, self.validate = random_feature(self.input_data)
if self.features_importance is not None and i == 0:
self.features_importance.append("random_feature")
self.matcher = FaissMatcher(
self.input_data,
self.outcomes,
self.treatment,
info_col=self.info_col,
features=self.features_importance,
group_col=self.group_col,
validation=self.validate,
n_neighbors=self.n_neighbors,
pbar=False,
)
elif refuter == "subset_refuter":
df, self.validate = subset_refuter(self.input_data, self.treatment, fraction)
self.matcher = FaissMatcher(
df,
self.outcomes,
self.treatment,
info_col=self.info_col,
features=self.features_importance,
group_col=self.group_col,
validation=self.validate,
n_neighbors=self.n_neighbors,
pbar=False,
)
else:
logger.error("Incorrect refuter name")
raise NameError(
"Incorrect refuter name! Available refuters: `random_feature`, `random_treatment`, `subset_refuter`"
)
if self.group_col is None:
sim = self.matcher.match()
else:
sim = self.matcher.group_match()
for key in self.val_dict.keys():
self.val_dict[key].append(sim[key][0])
for outcome in self.outcomes:
self.pval_dict.update({outcome: [np.mean(self.val_dict[outcome])]})
self.pval_dict[outcome].append(
test_significance(
self.results.query("outcome==@outcome").loc[effect_type.upper()]["effect_size"],
self.val_dict[outcome],
)
)
if refuter == "random_treatment":
self.input_data[self.treatment] = orig_treatment
elif refuter == "random_feature":
self.input_data = self.input_data.drop(columns="random_feature")
if self.features_importance is not None:
self.features_importance.remove("random_feature")
return self.pval_dict
def estimate(self, features: list = None) -> tuple:
"""Performs matching via Mahalanobis distance.
Args:
features:
List or feature_importances from LAMA of features for matching
Returns:
Results of matching and matching quality metrics
"""
if features is not None:
self.features_importance = features
return self._matching()
def save(self, filename):
"""Save the object to a file using pickle.
This method serializes the object and writes it to a file
Args:
filename:
The name of the file to write to.
"""
with open(filename, "wb") as f:
pickle.dump(self, f)
@classmethod
def load(cls, filename):
"""Load an object from a file.
This method reads a file and deserializes the object from it
Args:
filename:
The name of the file to read from.
Returns:
The deserialized object
"""
with open(filename, "rb") as f:
return pickle.load(f)
# Path: hypex/utils/tutorial_data_creation.py
def create_test_data(
num_users: int = 10000,
na_step: Union[Iterable[int], int] = None,
nan_cols: Union[Iterable[str], str] = None,
file_name: str = None,
rs=None
):
"""Creates data for tutorial.
Args:
num_users: num of strings
na_step:
num or list of nums of period to make NaN (step of range)
If list - iterates accordingly order of columns
nan_cols:
name of one or several columns to fill with NaN
If list - iterates accordingly order of na_step
file_name: name of file to save; doesn't save file if None
Returns:
data: dataframe with
"""
if rs is not None:
np.random.seed(rs)
if (nan_cols is not None) and isinstance(nan_cols, str):
nan_cols = [nan_cols]
# Simulating dataset with known effect size
num_months = 12
# signup_months == 0 means customer did not sign up
signup_months = np.random.choice(np.arange(1, num_months), num_users) * np.random.randint(0, 2, size=num_users)
data = pd.DataFrame(
{
"user_id": np.repeat(np.arange(num_users), num_months),
"signup_month": np.repeat(signup_months, num_months), # signup month == 0 means customer did not sign up
"month": np.tile(np.arange(1, num_months + 1), num_users), # months are from 1 to 12
"spend": np.random.poisson(500, num_users * num_months),
}
)
# A customer is in the treatment group if and only if they signed up
data["treat"] = data["signup_month"] > 0
# Simulating an effect of month (monotonically decreasing--customers buy less later in the year)
data["spend"] = data["spend"] - data["month"] * 10
# Simulating a simple treatment effect of 100
after_signup = (data["signup_month"] < data["month"]) & (data["treat"])
data.loc[after_signup, "spend"] = data[after_signup]["spend"] + 100
# Setting the signup month (for ease of analysis)
i = 3
data = (
data[data.signup_month.isin([0, i])]
.groupby(["user_id", "signup_month", "treat"])
.apply(
lambda x: pd.Series(
{"pre_spends": x.loc[x.month < i, "spend"].mean(), "post_spends": x.loc[x.month > i, "spend"].mean(), }
)
)
.reset_index()
)
# Additional category features
gender_i = np.random.choice(a=[0, 1], size=data.user_id.nunique())
gender = [["M", "F"][i] for i in gender_i]
age = np.random.choice(a=range(18, 70), size=data.user_id.nunique())
industry_i = np.random.choice(a=range(1, 3), size=data.user_id.nunique())
industry_names = ["Finance", "E-commerce", "Logistics"]
industry = [industry_names[i] for i in industry_i]
data["age"] = age
data["gender"] = gender
data["industry"] = industry
data["industry"] = data["industry"].astype("str")
data["treat"] = data["treat"].astype(int)
# input nans in data if needed
data = set_nans(data, na_step, nan_cols)
if file_name is not None:
data.to_csv(ROOT / f"{file_name}.csv", index=False)
return data
# Path: tests/test_matcher.py
import pandas as pd
import sys
from pathlib import Path
from hypex import Matcher
from hypex.utils.tutorial_data_creation import create_test_data
ROOT = Path("").absolute().parents[0]
sys.path.append(str(ROOT))
# добавить дату в данные и пофиксить баги с этим
# учесть если info_col передается листом из одного значения или строкой
def create_model(group_col: str = None):
data = pd.read_csv(ROOT / "Tutorial_data.csv")
info_col = ["user_id", "signup_month"]
outcome = "post_spends"
treatment = "treat"
model = Matcher(input_data=data, outcome=outcome, treatment=treatment, info_col=info_col, group_col=group_col)
return model
def test_matcher_pos():
model = create_model()
res, quality_res, df_matched = model.estimate()
assert len(model.quality_result.keys()) == 4, "quality results return not four metrics"
assert list(model.quality_result.keys()) == ["psi", "ks_test", "smd", "repeats"], "metrics renamed"
assert list(model.results.index) == ["ATE", "ATC", "ATT"], "format of results is changed: type of effects"
assert list(model.results.columns) == [
"effect_size",
"std_err",
"p-val",
"ci_lower",
"ci_upper",
"post_spends",
], "format of results is changed: columns in report"
| assert model.results["p-val"].values[0] <= 0.05, "p-value on ATE is greater than 0.1" |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: TianrongChen/DMSB
# Path: metrics.py
class MMD_loss(torch.nn.Module):
'''
fork from: https://github.com/ZongxianLee/MMD_Loss.Pytorch
'''
def __init__(self, kernel_mul = 2.0, kernel_num = 5):
super(MMD_loss, self).__init__()
self.kernel_num = kernel_num
self.kernel_mul = kernel_mul
self.fix_sigma = None
return
def guassian_kernel(self, source, target, kernel_mul=2.0, kernel_num=5, fix_sigma=None):
n_samples = int(source.size()[0])+int(target.size()[0])
total = torch.cat([source, target], dim=0)
total0 = total.unsqueeze(0).expand(int(total.size(0)), int(total.size(0)), int(total.size(1)))
total1 = total.unsqueeze(1).expand(int(total.size(0)), int(total.size(0)), int(total.size(1)))
L2_distance = ((total0-total1)**2).sum(2)
if fix_sigma:
bandwidth = fix_sigma
else:
bandwidth = torch.sum(L2_distance.data) / (n_samples**2-n_samples)
bandwidth /= kernel_mul ** (kernel_num // 2)
bandwidth_list = [bandwidth * (kernel_mul**i) for i in range(kernel_num)]
kernel_val = [torch.exp(-L2_distance / bandwidth_temp) for bandwidth_temp in bandwidth_list]
return sum(kernel_val)
def forward(self, source, target):
batch_size = int(source.size()[0])
kernels = self.guassian_kernel(source, target, kernel_mul=self.kernel_mul, kernel_num=self.kernel_num, fix_sigma=self.fix_sigma)
XX = kernels[:batch_size, :batch_size]
YY = kernels[batch_size:, batch_size:]
XY = kernels[:batch_size, batch_size:]
YX = kernels[batch_size:, :batch_size]
loss = torch.mean(XX + YY - XY -YX)
return loss
# Path: metrics.py
def compute_metrics(opt, pred_traj, ref_data, metrics, runner,stage):
'''
pred_traj: [batch_size, interval, data_dim] torch.Tensor
ref_data: [num_dist, batch_size, data_dim], torch.Tensor, we use whole ref data which is similar to FID computation
The reference data and prediction are all the marignals. We delete the leave one out (--LOO) marginal during the training, but we still evaluate them during here.
'''
sample_size = 1000
dist_time = np.linspace(0, opt.interval-1, opt.num_dist).astype(int) #we delete a distribution when LOO during training, so num_dist is same as original marginal
pred_idx = np.random.choice(pred_traj.shape[0], sample_size, replace=False) #random sample from batch
pred_data = pred_traj[pred_idx][:,dist_time,0:opt.data_dim[0]] # [samp_bs, num_dist, data_dim]
pred_data = pred_data.transpose(1,0,2)/opt.data_scale # [num_dist, samp_bs, data_dim]
for metric_idx, metric in enumerate(metrics): #loop over metrics
avg_metric = 0
for idx,(pred,ref) in enumerate(zip(pred_data, ref_data)):
if idx==0:
continue # First marginal does not need to be evaluate. We do not generate it, just ground truth.
if opt.metrics[metric_idx] == 'MMD':
ref_idx = np.random.choice(ref.shape[0], sample_size, replace=False)
ref = torch.Tensor(ref[ref_idx])
pred = torch.Tensor(pred)
loss = metric(pred,ref)
avg_metric += loss
print(util.green('{} for time{} is {}'.format(opt.metrics[metric_idx], idx,loss)))
runner.log_tb(stage, loss, '{}_t{}'.format(opt.metrics[metric_idx],idx),'SB_forward')
avg_metric = avg_metric/(opt.num_dist-1)
print('AVERAGE {} IS {}'.format(opt.metrics[metric_idx],avg_metric))
runner.log_tb(stage, avg_metric, '{}_avg'.format(opt.metrics[metric_idx]), 'SB_forward')
return pred_data
# Path: metrics.py
def metric_build(opt):
metrics = {
'SWD':sliced_wasserstein_distance,
'MMD':MMD_loss(),
'MWD':max_sliced_wasserstein_distance
}
return [metrics.get(key) for key in opt.metrics]
# Path: loss.py
def compute_sb_DSB_train(opt, label, label_aux,dyn, ts, ms, policy_opt, return_z=False, itr=None):
""" Implementation of Eq (18,19) in our main paper.
"""
dt = dyn.dt
zs = policy_opt(ms,ts)
g_ts = dyn.g(ts)
g_ts = g_ts[:,None,None,None] if util.is_image_dataset(opt) else g_ts[:,None]
loss = torch.nn.functional.mse_loss(g_ts*dt*zs,label)
return loss, zs if return_z else loss
# Path: runner.py
import os, time, gc
import numpy as np
import torch
import torch.nn.functional as F
import policy
import sde
import data
import util
from torch.optim import SGD, RMSprop, Adagrad, AdamW, lr_scheduler, Adam
from torch.utils.tensorboard import SummaryWriter
from torch_ema import ExponentialMovingAverage
from metrics import MMD_loss,compute_metrics,metric_build
from loss import compute_sb_DSB_train
from ipdb import set_trace as debug
self.sb_alternate_train_stage(
opt, stage, boundry_ep, 'backward', rollout = [0,opt.num_dist-1], resample=True #Train backward Kboundary
)
self.sb_alternate_train_stage(
opt, stage, bridge_ep, 'forward', rollout = [0,opt.num_dist-1], resample=False #Train K bridge forward
)
reused_sampler = self.evaluate(opt, stage+1, rollout = [0,opt.num_dist-1],resample=False)
if opt.log_tb: self.writer.close()
def sb_alternate_train_stage(self, opt, stage, epoch, direction, reused_sampler=None, rollout=False, resample=True):
policy_opt, policy_impt = {
'forward': [self.z_f, self.z_b], # train forwad, sample from backward
'backward': [self.z_b, self.z_f], # train backward, sample from forward
}.get(direction)
for ep in range(epoch):
# prepare training data
train_ms, train_zs, train_ts, train_labels = self.sample_train_data(
opt, policy_opt, policy_impt, reused_sampler, rollout=rollout, resample=resample
)
# train one epoch
policy_impt = freeze_policy(policy_impt)
policy_opt = activate_policy(policy_opt)
self.DSB_alternate_train_ep(
opt, ep, stage, direction, train_ms, train_zs, train_ts, train_labels, policy_opt, epoch
)
def DSB_alternate_train_ep(
self, opt, ep, stage, direction, train_xs, train_zs, train_ts, train_labels, policy, num_epoch
):
assert train_xs.shape[0] == opt.samp_bs
assert train_zs.shape[0] == opt.samp_bs
assert direction == policy.direction
optimizer, ema, sched = self.get_optimizer_ema_sched(policy)
use_amp=opt.use_amp
scaler = torch.cuda.amp.GradScaler(enabled=use_amp)
for it in range(opt.num_itr):
# -------- sample x_idx and t_idx \in [0, interval] --------
samp_m_idx = torch.randint(opt.samp_bs, (opt.train_bs_x,),device='cpu')
samp_t_idx = util.time_sample(opt.interval, policy.direction, opt.train_bs_t)
if opt.use_arange_t: samp_t_idx = util.time_arange(train_ts.shape[0], policy.direction)
# -------- build sample --------
sign=1 if policy.direction=='forward' else -1
ts = train_ts[samp_t_idx].detach().to(opt.device)
ms = train_xs[samp_m_idx][:, samp_t_idx, ...].to(opt.device)
zs_impt = train_zs[samp_m_idx][:, samp_t_idx+sign, ...].to(opt.device)
train_label = train_labels[samp_m_idx][:, samp_t_idx+sign, ...].to(opt.device)
optimizer.zero_grad(set_to_none=True)
# -------- handle for batch_x and batch_t ---------
# (batch, T, xdim) --> (batch*T, xdim)
ms = util.flatten_dim01(ms)
zs_impt = util.flatten_dim01(zs_impt)
train_label = util.flatten_dim01(train_label)
ts = ts.repeat(opt.train_bs_x)
assert ms.shape[0] == ts.shape[0]
assert zs_impt.shape[0] == ts.shape[0]
# -------- compute loss and backprop --------
with torch.cuda.amp.autocast(enabled=use_amp):
loss, zs = compute_sb_DSB_train(
opt, train_label, zs_impt,self.dyn, ts, ms, policy, return_z=True,itr=it
)
assert not torch.isnan(loss)
scaler.scale(loss).backward()
if opt.grad_clip is not None:
torch.nn.utils.clip_grad_norm(policy.parameters(), opt.grad_clip)
scaler.step(optimizer)
scaler.update()
optimizer.step()
ema.update()
if sched is not None: sched.step()
# -------- logging --------
zs = util.unflatten_dim01(zs, [len(samp_m_idx), len(samp_t_idx)])
zs_impt = zs_impt.reshape(zs.shape)
self.log_sb_alternate_train(
opt, it, ep, stage, loss, optimizer, direction, num_epoch
)
@torch.no_grad()
def evaluate(self, opt, stage, rollout=None, resample=False, ode_samp=False):
corrector = (lambda x,t: self.z_f(x,t) + self.z_b(x,t)) if opt.use_corrector else None
ODE_drift = (lambda x,t: 0.5*(self.z_b(x,t) - self.z_f(x,t))) if ode_samp else None
snapshot, ckpt = util.evaluate_stage(opt, stage)
snapshot=True
if ckpt:
self.v_dists = self.dyn.prev_v_boundary
keys = ['z_f','optimizer_f','ema_f','z_b','optimizer_b','ema_b','v_dists']
util.save_checkpoint(opt, self, keys, stage)
if snapshot:
print(util.blue('======Ploting visualization image======'))
for z in [self.z_b, self.z_f]:
z = freeze_policy(z)
ms, _, _, _,_ = self.dyn.sample_traj(
self.ts,
z,
save_traj=True,
corrector=corrector,
rollout=rollout,
resample=resample,
test=True,
ode_drift= ODE_drift
)
fn = "{}/xs-stage{}-{}".format(z.direction, stage,z.direction)
if opt.problem_name =='semicircle':
| util.save_toy_traj( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: mileswyn/SAMIHS
# Path: models/segment_anything_samihs/modeling/samihs.py
class Samihs(nn.Module):
mask_threshold: float = 0.0
image_format: str = "RGB"
def __init__(
self,
image_encoder: ImageEncoderViT,
prompt_encoder: PromptEncoder,
mask_decoder: MaskDecoder,
pixel_mean: List[float] = [123.675, 116.28, 103.53],
pixel_std: List[float] = [58.395, 57.12, 57.375],
) -> None:
"""
SAM predicts object masks from an image and input prompts.
Arguments:
image_encoder (ImageEncoderViT): The backbone used to encode the
image into image embeddings that allow for efficient mask prediction.
prompt_encoder (PromptEncoder): Encodes various types of input prompts.
mask_decoder (MaskDecoder): Predicts masks from the image embeddings
and encoded prompts.
pixel_mean (list(float)): Mean values for normalizing pixels in the input image.
pixel_std (list(float)): Std values for normalizing pixels in the input image.
"""
super().__init__()
self.image_encoder = image_encoder
self.prompt_encoder = prompt_encoder
self.mask_decoder = mask_decoder
self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
####################################################
# for param in self.prompt_encoder.parameters():
# param.requires_grad = False
# for param in self.mask_decoder.parameters():
# param.requires_grad = False
####################################################
# for param in self.image_encoder.parameters():
# param.requires_grad = False
for n, value in self.image_encoder.named_parameters():
if "down_projection" not in n and "Adapter" not in n:
value.requires_grad = False
if "down_projection" in n or "adapter" in n:
value.requires_grad = True
@property
def device(self) -> Any:
return self.pixel_mean.device
@torch.no_grad()
def forward_sam(
self,
batched_input: List[Dict[str, Any]],
multimask_output: bool,
) -> List[Dict[str, torch.Tensor]]:
"""
Predicts masks end-to-end from provided images and prompts.
If prompts are not known in advance, using SamPredictor is
recommended over calling the model directly.
Arguments:
batched_input (list(dict)): A list over input images, each a
dictionary with the following keys. A prompt key can be
excluded if it is not present.
'image': The image as a torch tensor in 3xHxW format,
already transformed for input to the model.
'original_size': (tuple(int, int)) The original size of
the image before transformation, as (H, W).
'point_coords': (torch.Tensor) Batched point prompts for
this image, with shape BxNx2. Already transformed to the
input frame of the model.
'point_labels': (torch.Tensor) Batched labels for point prompts,
with shape BxN.
'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.
Already transformed to the input frame of the model.
'mask_inputs': (torch.Tensor) Batched mask inputs to the model,
in the form Bx1xHxW.
multimask_output (bool): Whether the model should predict multiple
disambiguating masks, or return a single mask.
Returns:
(list(dict)): A list over input images, where each element is
as dictionary with the following keys.
'masks': (torch.Tensor) Batched binary mask predictions,
with shape BxCxHxW, where B is the number of input prompts,
C is determined by multimask_output, and (H, W) is the
original size of the image.
'iou_predictions': (torch.Tensor) The model's predictions
of mask quality, in shape BxC.
'low_res_logits': (torch.Tensor) Low resolution logits with
shape BxCxHxW, where H=W=256. Can be passed as mask input
to subsequent iterations of prediction.
"""
input_images = torch.stack([self.preprocess(x["image"]) for x in batched_input], dim=0)
image_embeddings, skip_cache = self.image_encoder(input_images)
outputs = []
for image_record, curr_embedding in zip(batched_input, image_embeddings):
if "point_coords" in image_record:
points = (image_record["point_coords"], image_record["point_labels"])
else:
points = None
sparse_embeddings, dense_embeddings = self.prompt_encoder(
points=points,
boxes=image_record.get("boxes", None),
masks=image_record.get("mask_inputs", None),
)
low_res_masks, iou_predictions = self.mask_decoder(
image_embeddings=curr_embedding.unsqueeze(0),
image_pe=self.prompt_encoder.get_dense_pe(),
sparse_prompt_embeddings=sparse_embeddings,
dense_prompt_embeddings=dense_embeddings,
multimask_output=multimask_output,
encoder_cache=skip_cache,
)
masks = self.postprocess_masks(
low_res_masks,
input_size=image_record["image"].shape[-2:],
original_size=image_record["original_size"],
)
masks = masks > self.mask_threshold
outputs.append(
{
"masks": masks,
"iou_predictions": iou_predictions,
"low_res_logits": low_res_masks,
}
)
return outputs
def forward(
self,
imgs: torch.Tensor,
pt: Tuple[torch.Tensor, torch.Tensor], # [b n 2, b n]
bbox: torch.Tensor=None, # b 4
) -> torch.Tensor:
# imge, skip_cache = self.image_encoder(imgs)
imge = self.image_encoder(imgs)
if len(pt[0].shape) == 3:
se, de = self.prompt_encoder( # se b 2 256, de b 256 32 32
points=pt,
boxes=None,
masks=None,
)
low_res_masks, _ = self.mask_decoder( # low_res_mask b 1 128 128
image_embeddings=imge,
image_pe=self.prompt_encoder.get_dense_pe(),
sparse_prompt_embeddings=se,
dense_prompt_embeddings=de,
multimask_output=False,
# encoder_cache=skip_cache,
)
masks = F.interpolate(low_res_masks, (256, 256), mode="bilinear", align_corners=False)
outputs = {"low_res_logits": low_res_masks, "masks": low_res_masks} # 10.10
return outputs
else:
low_res_masks, masks = [], []
for i in range(pt[0].shape[1]):
pti = (pt[0][:, i, :, :], pt[1][:, i, :])
sei, dei = self.prompt_encoder( # se b 2 256, de b 256 32 32
points=pti,
boxes=None,
masks=None,
)
low_res_masksi, _ = self.mask_decoder( # low_res_mask b 1 128 128
image_embeddings=imge,
image_pe=self.prompt_encoder.get_dense_pe(),
sparse_prompt_embeddings=sei,
dense_prompt_embeddings=dei,
multimask_output=False,
)
masksi = F.interpolate(low_res_masksi, (256, 256), mode="bilinear", align_corners=False)
low_res_masks.append(low_res_masksi)
masks.append(masksi)
low_res_masks = torch.stack(low_res_masks, dim=1)
masks = torch.stack(masks, dim=1) # b c 1 255 255
masks = masks.reshape(masks.shape[0], -1, masks.shape[3], masks.shape[4])
low_res_masks = low_res_masks.reshape(low_res_masks.shape[0], -1, low_res_masks.shape[3], low_res_masks.shape[4])
outputs = {"low_res_logits": low_res_masks, "masks": masks}
return outputs
def postprocess_masks(
self,
masks: torch.Tensor,
input_size: Tuple[int, ...],
original_size: Tuple[int, ...],
) -> torch.Tensor:
"""
Remove padding and upscale masks to the original image size.
Arguments:
masks (torch.Tensor): Batched masks from the mask_decoder,
in BxCxHxW format.
input_size (tuple(int, int)): The size of the image input to the
model, in (H, W) format. Used to remove padding.
original_size (tuple(int, int)): The original size of the image
before resizing for input to the model, in (H, W) format.
Returns:
(torch.Tensor): Batched masks in BxCxHxW format, where (H, W)
is given by original_size.
"""
masks = F.interpolate(
masks,
(self.image_encoder.img_size, self.image_encoder.img_size),
mode="bilinear",
align_corners=False,
)
masks = masks[..., : input_size[0], : input_size[1]]
masks = F.interpolate(masks, original_size, mode="bilinear", align_corners=False)
return masks
def preprocess(self, x: torch.Tensor) -> torch.Tensor:
"""Normalize pixel values and pad to a square input."""
# Normalize colors
x = (x - self.pixel_mean) / self.pixel_std
# Pad
h, w = x.shape[-2:]
padh = self.image_encoder.img_size - h
padw = self.image_encoder.img_size - w
x = F.pad(x, (0, padw, 0, padh))
return x
# Path: models/segment_anything_samihs/utils/amg.py
class MaskData:
"""
A structure for storing masks and their related data in batched format.
Implements basic filtering and concatenation.
"""
def __init__(self, **kwargs) -> None:
for v in kwargs.values():
assert isinstance(
v, (list, np.ndarray, torch.Tensor)
), "MaskData only supports list, numpy arrays, and torch tensors."
self._stats = dict(**kwargs)
def __setitem__(self, key: str, item: Any) -> None:
assert isinstance(
item, (list, np.ndarray, torch.Tensor)
), "MaskData only supports list, numpy arrays, and torch tensors."
self._stats[key] = item
def __delitem__(self, key: str) -> None:
del self._stats[key]
def __getitem__(self, key: str) -> Any:
return self._stats[key]
def items(self) -> ItemsView[str, Any]:
return self._stats.items()
def filter(self, keep: torch.Tensor) -> None:
for k, v in self._stats.items():
if v is None:
self._stats[k] = None
elif isinstance(v, torch.Tensor):
self._stats[k] = v[torch.as_tensor(keep, device=v.device)]
elif isinstance(v, np.ndarray):
self._stats[k] = v[keep.detach().cpu().numpy()]
elif isinstance(v, list) and keep.dtype == torch.bool:
self._stats[k] = [a for i, a in enumerate(v) if keep[i]]
elif isinstance(v, list):
self._stats[k] = [v[i] for i in keep]
else:
raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
def cat(self, new_stats: "MaskData") -> None:
for k, v in new_stats.items():
if k not in self._stats or self._stats[k] is None:
self._stats[k] = deepcopy(v)
elif isinstance(v, torch.Tensor):
self._stats[k] = torch.cat([self._stats[k], v], dim=0)
elif isinstance(v, np.ndarray):
self._stats[k] = np.concatenate([self._stats[k], v], axis=0)
elif isinstance(v, list):
self._stats[k] = self._stats[k] + deepcopy(v)
else:
raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
def to_numpy(self) -> None:
for k, v in self._stats.items():
if isinstance(v, torch.Tensor):
self._stats[k] = v.detach().cpu().numpy()
# Path: models/segment_anything_samihs/utils/amg.py
def area_from_rle(rle: Dict[str, Any]) -> int:
return sum(rle["counts"][1::2])
# Path: models/segment_anything_samihs/utils/amg.py
def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:
assert len(args) > 0 and all(
len(a) == len(args[0]) for a in args
), "Batched iteration must have inputs of all the same size."
n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)
for b in range(n_batches):
yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]
# Path: models/segment_anything_samihs/utils/amg.py
def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:
"""
Calculates boxes in XYXY format around masks. Return [0,0,0,0] for
an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.
"""
# torch.max below raises an error on empty inputs, just skip in this case
if torch.numel(masks) == 0:
return torch.zeros(*masks.shape[:-2], 4, device=masks.device)
# Normalize shape to CxHxW
shape = masks.shape
h, w = shape[-2:]
if len(shape) > 2:
masks = masks.flatten(0, -3)
else:
masks = masks.unsqueeze(0)
# Get top and bottom edges
in_height, _ = torch.max(masks, dim=-1)
in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]
bottom_edges, _ = torch.max(in_height_coords, dim=-1)
in_height_coords = in_height_coords + h * (~in_height)
top_edges, _ = torch.min(in_height_coords, dim=-1)
# Get left and right edges
in_width, _ = torch.max(masks, dim=-2)
in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]
right_edges, _ = torch.max(in_width_coords, dim=-1)
in_width_coords = in_width_coords + w * (~in_width)
left_edges, _ = torch.min(in_width_coords, dim=-1)
# If the mask is empty the right edge will be to the left of the left edge.
# Replace these boxes with [0, 0, 0, 0]
empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)
out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)
out = out * (~empty_filter).unsqueeze(-1)
# Return to original shape
if len(shape) > 2:
out = out.reshape(*shape[:-2], 4)
else:
out = out[0]
return out
# Path: models/segment_anything_samihs/utils/amg.py
def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:
box_xywh = deepcopy(box_xyxy)
box_xywh[2] = box_xywh[2] - box_xywh[0]
box_xywh[3] = box_xywh[3] - box_xywh[1]
return box_xywh
# Path: models/segment_anything_samihs/utils/amg.py
def build_all_layer_point_grids(
n_per_side: int, n_layers: int, scale_per_layer: int
) -> List[np.ndarray]:
"""Generates point grids for all crop layers."""
points_by_layer = []
for i in range(n_layers + 1):
n_points = int(n_per_side / (scale_per_layer**i))
points_by_layer.append(build_point_grid(n_points))
return points_by_layer
# Path: models/segment_anything_samihs/utils/amg.py
def calculate_stability_score(
masks: torch.Tensor, mask_threshold: float, threshold_offset: float
) -> torch.Tensor:
"""
Computes the stability score for a batch of masks. The stability
score is the IoU between the binary masks obtained by thresholding
the predicted mask logits at high and low values.
"""
# One mask is always contained inside the other.
# Save memory by preventing unnecessary cast to torch.int64
intersections = (
(masks > (mask_threshold + threshold_offset))
.sum(-1, dtype=torch.int16)
.sum(-1, dtype=torch.int32)
)
unions = (
(masks > (mask_threshold - threshold_offset))
.sum(-1, dtype=torch.int16)
.sum(-1, dtype=torch.int32)
)
return intersections / unions
# Path: models/segment_anything_samihs/utils/amg.py
def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:
from pycocotools import mask as mask_utils # type: ignore
h, w = uncompressed_rle["size"]
rle = mask_utils.frPyObjects(uncompressed_rle, h, w)
rle["counts"] = rle["counts"].decode("utf-8") # Necessary to serialize with json
return rle
# Path: models/segment_anything_samihs/utils/amg.py
def generate_crop_boxes(
im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float
) -> Tuple[List[List[int]], List[int]]:
"""
Generates a list of crop boxes of different sizes. Each layer
has (2**i)**2 boxes for the ith layer.
"""
crop_boxes, layer_idxs = [], []
im_h, im_w = im_size
short_side = min(im_h, im_w)
# Original image
crop_boxes.append([0, 0, im_w, im_h])
layer_idxs.append(0)
def crop_len(orig_len, n_crops, overlap):
return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))
for i_layer in range(n_layers):
n_crops_per_side = 2 ** (i_layer + 1)
overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))
crop_w = crop_len(im_w, n_crops_per_side, overlap)
crop_h = crop_len(im_h, n_crops_per_side, overlap)
crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]
crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]
# Crops in XYWH format
for x0, y0 in product(crop_box_x0, crop_box_y0):
box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]
crop_boxes.append(box)
layer_idxs.append(i_layer + 1)
return crop_boxes, layer_idxs
# Path: models/segment_anything_samihs/utils/amg.py
def is_box_near_crop_edge(
boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0
) -> torch.Tensor:
"""Filter masks at the edge of a crop, but not at the edge of the original image."""
crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)
orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)
boxes = uncrop_boxes_xyxy(boxes, crop_box).float()
near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)
near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)
near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)
return torch.any(near_crop_edge, dim=1)
# Path: models/segment_anything_samihs/utils/amg.py
def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:
"""
Encodes masks to an uncompressed RLE, in the format expected by
pycoco tools.
"""
# Put in fortran order and flatten h,w
b, h, w = tensor.shape
tensor = tensor.permute(0, 2, 1).flatten(1)
# Compute change indices
diff = tensor[:, 1:] ^ tensor[:, :-1]
change_indices = diff.nonzero()
# Encode run length
out = []
for i in range(b):
cur_idxs = change_indices[change_indices[:, 0] == i, 1]
cur_idxs = torch.cat(
[
torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),
cur_idxs + 1,
torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),
]
)
btw_idxs = cur_idxs[1:] - cur_idxs[:-1]
counts = [] if tensor[i, 0] == 0 else [0]
counts.extend(btw_idxs.detach().cpu().tolist())
out.append({"size": [h, w], "counts": counts})
return out
# Path: models/segment_anything_samihs/utils/amg.py
def remove_small_regions(
mask: np.ndarray, area_thresh: float, mode: str
) -> Tuple[np.ndarray, bool]:
"""
Removes small disconnected regions and holes in a mask. Returns the
mask and an indicator of if the mask has been modified.
"""
import cv2 # type: ignore
assert mode in ["holes", "islands"]
correct_holes = mode == "holes"
working_mask = (correct_holes ^ mask).astype(np.uint8)
n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)
sizes = stats[:, -1][1:] # Row 0 is background label
small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]
if len(small_regions) == 0:
return mask, False
fill_labels = [0] + small_regions
if not correct_holes:
fill_labels = [i for i in range(n_labels) if i not in fill_labels]
# If every region is below threshold, keep largest
if len(fill_labels) == 0:
fill_labels = [int(np.argmax(sizes)) + 1]
mask = np.isin(regions, fill_labels)
return mask, True
# Path: models/segment_anything_samihs/utils/amg.py
def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:
"""Compute a binary mask from an uncompressed RLE."""
h, w = rle["size"]
mask = np.empty(h * w, dtype=bool)
idx = 0
parity = False
for count in rle["counts"]:
mask[idx : idx + count] = parity
idx += count
parity ^= True
mask = mask.reshape(w, h)
return mask.transpose() # Put in C order
# Path: models/segment_anything_samihs/utils/amg.py
def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
x0, y0, _, _ = crop_box
offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)
# Check if boxes has a channel dimension
if len(boxes.shape) == 3:
offset = offset.unsqueeze(1)
return boxes + offset
# Path: models/segment_anything_samihs/utils/amg.py
def uncrop_masks(
masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int
) -> torch.Tensor:
x0, y0, x1, y1 = crop_box
if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:
return masks
# Coordinate transform masks
pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)
pad = (x0, pad_x - x0, y0, pad_y - y0)
return torch.nn.functional.pad(masks, pad, value=0)
# Path: models/segment_anything_samihs/utils/amg.py
def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
x0, y0, _, _ = crop_box
offset = torch.tensor([[x0, y0]], device=points.device)
# Check if points has a channel dimension
if len(points.shape) == 3:
offset = offset.unsqueeze(1)
return points + offset
# Path: models/segment_anything_samihs/automatic_mask_generator.py
import numpy as np
import torch
import cv2 # type: ignore # noqa: F401
from torchvision.ops.boxes import batched_nms, box_area # type: ignore
from typing import Any, Dict, List, Optional, Tuple
from .modeling import Samihs
from .utils.amg import (
MaskData,
area_from_rle,
batch_iterator,
batched_mask_to_box,
box_xyxy_to_xywh,
build_all_layer_point_grids,
calculate_stability_score,
coco_encode_rle,
generate_crop_boxes,
is_box_near_crop_edge,
mask_to_rle_pytorch,
remove_small_regions,
rle_to_mask,
uncrop_boxes_xyxy,
uncrop_masks,
uncrop_points,
)
from pycocotools import mask as mask_utils # type: ignore # noqa: F401
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class SamAutomaticMaskGenerator:
def __init__(
self,
model: Samihs,
points_per_side: Optional[int] = 32,
points_per_batch: int = 64,
pred_iou_thresh: float = 0.88,
stability_score_thresh: float = 0.95,
stability_score_offset: float = 1.0,
box_nms_thresh: float = 0.7,
crop_n_layers: int = 0,
crop_nms_thresh: float = 0.7,
crop_overlap_ratio: float = 512 / 1500,
crop_n_points_downscale_factor: int = 1,
point_grids: Optional[List[np.ndarray]] = None,
| min_mask_region_area: int = 0, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: tianhaowuhz/human-assisting-dex-grasp
# Path: Algorithms/SDE_update.py
def loss_fn_cond(model, x, marginal_prob_fn, sde_fn, is_likelihood_weighting=False, eps=1e-5, device='cuda:0', hand_pcl=False, full_state=None, envs=None, hand_model=None, space='euler', relative=True):
"""
is_likelihood_weighting = True, can potentially improve likelihood-estimation (e.g., for reward learning)
"""
hand_dof_batch, obj_pcl_batch = x
if space == 'riemann':
hand_dof_batch = action2grad(hand_dof_batch, relative=relative)
batchsize = hand_dof_batch.shape[0]
random_t = torch.rand(batchsize, device=device) * (1. - eps) + eps
# random_t = torch.pow(10,-5*random_t)
random_t = random_t.unsqueeze(-1)
z = torch.randn_like(hand_dof_batch)
mu, std = marginal_prob_fn(hand_dof_batch, random_t)
perturbed_hand_dof_batch = mu + z * std
if hand_pcl:
if space == 'riemann':
hand_dof = action2grad(perturbed_hand_dof_batch.clone(), relative=relative, inv=True)
else:
hand_dof = perturbed_hand_dof_batch.clone()
hand_pos_2_w = full_state[:,18:21].clone().to(device).float()
hand_quat_2_w = full_state[:,21:25].clone().to(device).float()
hand_pos_2_h, hand_quat_2_h = envs.transform_target2source(hand_quat_2_w, hand_pos_2_w, hand_quat_2_w, hand_pos_2_w)
ori_hand_dof = envs.dof_norm(hand_dof.clone(),inv=True)
hand_pcl_2h = hand_model.get_hand_pcl(hand_pos=hand_pos_2_h, hand_quat=hand_quat_2_h, hand_dof=ori_hand_dof)
obj_pcl_batch = torch.cat([obj_pcl_batch, hand_pcl_2h.reshape(hand_pcl_2h.size(0),hand_pcl_2h.size(2),hand_pcl_2h.size(1))],2)
output = model((perturbed_hand_dof_batch.reshape(batchsize, -1, 1), obj_pcl_batch), random_t)
total_loss = (output + z / std) ** 2
if is_likelihood_weighting:
_, diffusion_coeff = sde_fn(random_t)
loss_weighting = diffusion_coeff ** 2
node_l2 = torch.sum(total_loss, dim=-1) * loss_weighting
else:
loss_weighting = std ** 2
node_l2 = torch.sum(total_loss * loss_weighting, dim=-1)
loss_ = torch.mean(node_l2)
return loss_
# Path: Algorithms/SDE_update.py
def cond_ode_sampler(
score_model,
prior_fn,
sde_fn,
state,
batch_size=64,
atol=1e-5,
rtol=1e-5,
device='cuda',
eps=1e-5,
t0=1,
num_steps=None,
is_random=True,
denoise=True,
hand_pcl=False,
full_state=None,
envs=None,
hand_model=None,
space='euler',
relative=True,
):
hand_dof_batch, obj_pcl_batch = state
if space == 'riemann':
hand_dof_batch = action2grad(hand_dof_batch, relative=relative)
t0_ = torch.ones(batch_size, device=device)*t0
if is_random:
init_x = prior_fn(hand_dof_batch.shape).to(device) # normal distribution
# init_x = torch.randn_like(hand_dof_batch, device=device) * marginal_prob_std(t0_)
# init_x = -torch.ones_like(hand_dof_batch, device=device)
# init_x = torch.tensor([ 0.0000, -0.7143, -1.0000, 0.0000, -0.7143, -1.0000, 0.0000, -0.7143,
# -1.0000, -1.0000, 0.0000, -0.7143, -1.0000, 0.0000, -1.0000, 0.0000,
# 0.0000, -1.0000,1,1,1,1,1,1,1], device=device).reshape(1,-1)[:,:hand_dof_batch.size(1)].expand_as(hand_dof_batch)
else:
batch_size = hand_dof_batch.size(0)
init_x = hand_dof_batch
# Create the latent code
# init_x = torch.randn_like(hand_dof_batch, device=device) * marginal_prob_std(t0_)
# !!! for dex hand only, set to same init state
# init_x = hand_dof_batch
shape = init_x.shape
state_dim = shape[-1]
def score_eval_wrapper(sample, time_steps):
"""A wrapper of the score-based model for use by the ODE solver."""
with torch.no_grad():
score = score_model(sample, time_steps)
# return score.cpu().numpy().reshape((-1,))
return score.cpu().numpy().reshape(-1)
def ode_func(t, x):
"""The ODE function for use by the ODE solver."""
x = torch.tensor(x.reshape(-1, state_dim)).to(device).float()
time_steps = torch.ones(batch_size, device=device).unsqueeze(1) * t
# if batch_size == 1:
# time_steps = torch.ones(batch_size, device=device).unsqueeze(1) * t
# else:
# time_steps = torch.ones(batch_size, device=device) * t
drift, diffusion = sde_fn(torch.tensor(t))
drift = drift.cpu().numpy()
diffusion = diffusion.cpu().numpy()
if hand_pcl:
hand_dof = x.clone()
hand_pos_2_w = full_state[:,18:21].clone().to(device).float()
hand_quat_2_w = full_state[:,21:25].clone().to(device).float()
hand_pos_2_h, hand_quat_2_h = envs.transform_target2source(hand_quat_2_w, hand_pos_2_w, hand_quat_2_w, hand_pos_2_w)
if space == 'riemann':
hand_dof = action2grad(hand_dof.clone(), relative=relative, inv=True)
else:
hand_dof = perturbed_hand_dof_batch.clone()
ori_hand_dof = envs.dof_norm(hand_dof.clone(),inv=True)
hand_pcl_2h = hand_model.get_hand_pcl(hand_pos=hand_pos_2_h, hand_quat=hand_quat_2_h, hand_dof=ori_hand_dof)
objhand_pcl_batch = torch.cat([obj_pcl_batch, hand_pcl_2h.reshape(hand_pcl_2h.size(0),hand_pcl_2h.size(2),hand_pcl_2h.size(1))],2)
gradient = score_eval_wrapper((x, objhand_pcl_batch), time_steps)
else:
gradient = score_eval_wrapper((x, obj_pcl_batch), time_steps)
# gradient[:6]*=100
# gradient[6:30]*=10
return drift - 0.5 * (diffusion**2) * gradient
# Run the black-box ODE solver.
t_eval = None
if num_steps is not None:
# num_steps, from t0 -> eps
t_eval = np.linspace(t0, eps, num_steps)
res = integrate.solve_ivp(ode_func, (t0, eps), init_x.reshape(-1).cpu().numpy(), rtol=rtol, atol=atol,
method='RK45', t_eval=t_eval)
# process, xs: [total_nodes*3, samples_num]
# clamp for now TODO
# xs = torch.clamp(torch.tensor(res.y, device=device).T, min=-1.0, max=1.0)
xs = torch.tensor(res.y, device=device).T
xs = xs.view(num_steps, hand_dof_batch.shape[0], -1)
# result x: [total_nodes, 3]
x = torch.clamp(torch.tensor(res.y[:, -1], device=device).reshape(shape), min=-1.0, max=1.0)
# x = torch.tensor(res.y[:, -1], device=device).reshape(shape)
# denoise, using the predictor step in P-C sampler
if denoise:
# Reverse diffusion predictor for denoising
vec_eps = torch.ones((x.shape[0], 1), device=x.device) * eps
drift, diffusion = sde_fn(vec_eps)
grad = score_model((x.float(), obj_pcl_batch), vec_eps)
drift = drift - diffusion ** 2 * grad # R-SDE
mean_x = x + drift * ((1 - eps) / (1000 if num_steps is None else num_steps))
x = mean_x
if space=='riemann':
xs = action2grad(xs, inv=True, relative=relative)
x = action2grad(x, inv=True, relative=relative)
return xs, x
# Path: Algorithms/SDE_update.py
def init_sde(sde_mode, min=0.1, max=10.0):
# the SDE-related hyperparameters are copied from https://github.com/yang-song/score_sde_pytorch
if sde_mode == 've':
sigma_min = 0.01
sigma_max = 90
prior_fn = functools.partial(ve_prior, sigma_min=sigma_min, sigma_max=sigma_max)
marginal_prob_fn = functools.partial(ve_marginal_prob, sigma_min=sigma_min, sigma_max=sigma_max)
sde_fn = functools.partial(ve_sde, sigma_min=sigma_min, sigma_max=sigma_max)
elif sde_mode == 'vp':
beta_0 = min
beta_1 = max
print(beta_0, beta_1)
prior_fn = functools.partial(vp_prior, beta_0=beta_0, beta_1=beta_1)
marginal_prob_fn = functools.partial(vp_marginal_prob, beta_0=beta_0, beta_1=beta_1)
sde_fn = functools.partial(vp_sde, beta_0=beta_0, beta_1=beta_1)
elif sde_mode == 'subvp':
beta_0 = 0.1
beta_1 = 20
prior_fn = functools.partial(subvp_prior, beta_0=beta_0, beta_1=beta_1)
marginal_prob_fn = functools.partial(subvp_marginal_prob, beta_0=beta_0, beta_1=beta_1)
sde_fn = functools.partial(subvp_sde, beta_0=beta_0, beta_1=beta_1)
else:
raise NotImplementedError
return prior_fn, marginal_prob_fn, sde_fn
# Path: Algorithms/SDE_update.py
class ExponentialMovingAverage:
"""
Maintains (exponential) moving average of a set of parameters.
"""
def __init__(self, parameters, decay, use_num_updates=True):
"""
Args:
parameters: Iterable of `torch.nn.Parameter`; usually the result of
`model.parameters()`.
decay: The exponential decay.
use_num_updates: Whether to use number of updates when computing
averages.
"""
if decay < 0.0 or decay > 1.0:
raise ValueError('Decay must be between 0 and 1')
self.decay = decay
self.num_updates = 0 if use_num_updates else None
self.shadow_params = [p.clone().detach()
for p in parameters if p.requires_grad]
self.collected_params = []
def update(self, parameters):
"""
Update currently maintained parameters.
Call this every time the parameters are updated, such as the result of
the `optimizer.step()` call.
Args:
parameters: Iterable of `torch.nn.Parameter`; usually the same set of
parameters used to initialize this object.
"""
decay = self.decay
if self.num_updates is not None:
self.num_updates += 1
decay = min(decay, (1 + self.num_updates) / (10 + self.num_updates))
one_minus_decay = 1.0 - decay
with torch.no_grad():
parameters = [p for p in parameters if p.requires_grad]
for s_param, param in zip(self.shadow_params, parameters):
s_param.sub_(one_minus_decay * (s_param - param)) # only update the ema-params
def copy_to(self, parameters):
"""
Copy current parameters into given collection of parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored moving averages.
"""
parameters = [p for p in parameters if p.requires_grad]
for s_param, param in zip(self.shadow_params, parameters):
if param.requires_grad:
param.data.copy_(s_param.data)
def store(self, parameters):
"""
Save the current parameters for restoring later.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
temporarily stored.
"""
self.collected_params = [param.clone() for param in parameters]
def restore(self, parameters):
"""
Restore the parameters stored with the `store` method.
Useful to validate the model with EMA parameters without affecting the
original optimization process. Store the parameters before the
`copy_to` method. After validation (or model saving), use this to
restore the former parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored parameters.
"""
for c_param, param in zip(self.collected_params, parameters):
param.data.copy_(c_param.data)
def state_dict(self):
return dict(decay=self.decay, num_updates=self.num_updates,
shadow_params=self.shadow_params)
def load_state_dict(self, state_dict):
self.decay = state_dict['decay']
self.num_updates = state_dict['num_updates']
self.shadow_params = state_dict['shadow_params']
# Path: Networks/SDENets_update.py
class CondScoreModel(nn.Module):
def __init__(self, marginal_prob_func, hidden_dim, embed_dim, state_dim=1,
mode='target', relative=False, pointnet_version='pt2', n_blocks=0, feature_dim_coff=1, space='euler'):
super(CondScoreModel, self).__init__()
self.marginal_prob_func = marginal_prob_func
self.point_feat_dim = 1088
hidden_dim = hidden_dim
embed_dim = embed_dim
self.embed_dim = embed_dim
self.mode = mode
self.pointnet_version = pointnet_version
if relative:
hand_state_dim = 18
if space == 'riemann':
hand_state_dim = 18+18
else:
hand_state_dim = 25
if space == 'riemann':
hand_state_dim = 25+18
self.n_blocks = n_blocks
self.hand_global_enc = nn.Sequential(
nn.Linear(hand_state_dim, hidden_dim),
nn.ReLU(False),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(False),
)
# obj pcl feature encoder
if pointnet_version == 'pt':
self.obj_enc = PointNetEncoder(global_feat=True, feature_transform=False, channel=3) # for pointnet
elif pointnet_version == 'pt2':
self.obj_enc = Pointnet2Backbone(feature_dim_coff=feature_dim_coff) # for pointnet2
# self.obj_enc = PointNetEncoder() # for pointnet2
# self.obj_cat_embed = nn.Embedding(301,512)
if self.n_blocks < 1:
self.obj_global_enc = nn.Sequential(
nn.Linear(1024, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, embed_dim),
nn.ReLU(),
)
self.embed_sigma = nn.Sequential(GaussianFourierProjection(embed_dim=embed_dim),
nn.Linear(embed_dim, embed_dim))
if n_blocks < 1:
self.init_enc = nn.Sequential(
nn.Linear(state_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, self.point_feat_dim),
nn.ReLU(),
)
# cond_dim = hidden_dim*2 + embed_dim*2 # consider wall
if self.mode == 'target':
cond_dim = embed_dim
# self.mhca = MHCA(num_heads=2, inp_dim=self.point_feat_dim, hid_dim=self.point_feat_dim)
''' main backbone '''
# # mlp1
self.mlp1_main = nn.Sequential(
nn.Linear((hidden_dim + embed_dim*2), hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
)
# # mlp2
self.mlp2_main = nn.Sequential(
nn.Linear(hidden_dim + embed_dim*2, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hand_state_dim),
)
else:
self.pre_dense_cond = nn.Linear(1024*feature_dim_coff, hidden_dim)
self.pre_dense_t = nn.Linear(embed_dim, hidden_dim)
# self.pre_gnorm = nn.GroupNorm(32, num_channels=hidden_dim)
for idx in range(n_blocks):
setattr(self, f'b{idx+1}_dense1', nn.Linear(hidden_dim, hidden_dim))
setattr(self, f'b{idx+1}_dense1_t', nn.Linear(embed_dim, hidden_dim))
setattr(self, f'b{idx+1}_dense1_cond', nn.Linear(hidden_dim, hidden_dim))
# setattr(self, f'b{idx+1}_gnorm1', nn.GroupNorm(32, num_channels=hidden_dim))
setattr(self, f'b{idx+1}_dense2', nn.Linear(hidden_dim, hidden_dim))
setattr(self, f'b{idx+1}_dense2_t', nn.Linear(embed_dim, hidden_dim))
setattr(self, f'b{idx+1}_dense2_cond', nn.Linear(hidden_dim, hidden_dim))
# setattr(self, f'b{idx+1}_gnorm2', nn.GroupNorm(32, num_channels=hidden_dim))
self.act = nn.ReLU(False)
self.post_dense = nn.Linear(hidden_dim, hand_state_dim)
def forward(self, batches, t, obj_feature=False):
"""
batches = hand_batch, obj_batch
hand_batch: [bs, 25, 1]
obj_batch: [bs, 3, 1024]
t: [bs] !! not [bs, 1] !!
"""
hand_batch, obj_batch = batches
batch_size = hand_batch.size(0)
hand_dof = hand_batch.size(1)
''' get cond feat'''
# sigma_feat: [num_nodes, embed_dim]
sigma_feat = F.relu(self.embed_sigma(t.squeeze(-1)),inplace=False)
# total_cond_feat: [num_nodes, hidden_dim*2+embed_dim*2]
# obj_feat,_, _ = self.obj_enc(obj_batch.reshape(batch_size,-1,3)) # B x 1024
## no cuda pointnet2
# obj_feat,_ = self.obj_enc(obj_batch) # B x 1024
# obj_feat = self.obj_global_enc(obj_feat)
if self.pointnet_version == 'pt':
obj_feat,_,_ = self.obj_enc(obj_batch.reshape(batch_size,-1,3).permute(0,2,1)) # B x 1024
elif self.pointnet_version == 'pt2':
## cuda pointnet2
obj_feat,_ = self.obj_enc(obj_batch.reshape(batch_size,-1,3)) # B x 1024
## pointnet
if obj_feature:
obj_feat_fr = obj_feat.clone()
if self.n_blocks < 1:
''' get init x feat '''
hand_global_feat = self.hand_global_enc(hand_batch.reshape(batch_size,-1))
obj_feat = self.obj_global_enc(obj_feat.reshape(batch_size,-1))
# obj_feat = torch.arange(0,batch_size,device=hand_batch.device)
# obj_feat = self.obj_cat_embed(obj_feat)
if self.mode == 'target':
total_cond_feat = torch.cat([sigma_feat, obj_feat], dim=-1) #
# total_cond_feat = sigma_feat
''' main backbone of x '''
x = torch.cat([hand_global_feat, total_cond_feat], -1)
x = self.mlp1_main(x)
x = torch.cat([x, total_cond_feat], -1)
x = self.mlp2_main(x)
else:
obj_feat = obj_feat.reshape(batch_size,-1)
obj_feat = self.pre_dense_cond(obj_feat)
x = self.hand_global_enc(hand_batch.reshape(batch_size,-1))
x = x + self.pre_dense_t(sigma_feat)
x = x + obj_feat
# x = self.pre_gnorm(x)
x = self.act(x)
for idx in range(self.n_blocks):
x1 = getattr(self, f'b{idx+1}_dense1')(x)
x1 = x1 + getattr(self, f'b{idx+1}_dense1_t')(sigma_feat)
x1 = x1 + getattr(self, f'b{idx+1}_dense1_cond')(obj_feat)
# x1 = getattr(self, f'b{idx+1}_gnorm1')(x1)
x1 = self.act(x1)
# dropout, maybe
# x1 = self.dropout(x1)
x2 = getattr(self, f'b{idx+1}_dense2')(x1)
x2 = x2 + getattr(self, f'b{idx+1}_dense2_t')(sigma_feat)
x2 = x2 + getattr(self, f'b{idx+1}_dense2_cond')(obj_feat)
# x2 = getattr(self, f'b{idx+1}_gnorm2')(x2)
x2 = self.act(x2)
# dropout, maybe
# x2 = self.dropout(x2)
x = x + x2
x = self.post_dense(x)
# normalize the output
_, std = self.marginal_prob_func(x, t)
x = x / (std + 1e-7)
if obj_feature:
return x, obj_feat_fr
else:
return x
# Path: utils/utils.py
def exists_or_mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
return False
else:
return True
# Path: utils/utils.py
def save_video(env, states, save_path, simulation=False, fps = 50, render_size = 256, suffix='avi'):
# states: [state, ....]
# state: (60, )
imgs = []
for _, state in tqdm(enumerate(states), desc='Saving video'):
# set_trace()
env_id = state[-1].long()
env.set_states(state.unsqueeze(0))
img = env.render(rgb=True,img_size=render_size)[env_id]
imgs.append(img.cpu().numpy())
if suffix == 'gif':
from PIL import Image
images_to_gif(save_path+f'.{suffix}', [Image.fromarray(img[:, :, ::-1], mode='RGB') for img in imgs], fps=len(imgs)//5)
else:
batch_imgs = np.stack(imgs, axis=0)
images_to_video(save_path+f'.{suffix}', batch_imgs, fps, (render_size, render_size))
# Path: utils/utils.py
def get_dict_key(dic, value):
key = list(dic.keys())[list(dic.values()).index(value)]
return key
# Path: utils/utils.py
class DexDataset(Dataset):
def __init__(self, dataset):
self.dataset = dataset
self.data_ot_idx = {}
# set_trace()
self.data_dim = self.dataset.shape[1]
self.data_ot = {}
obj_id = 0
for (idx,data) in enumerate(self.dataset):
# set_trace()
data_id = data[3104]
# print(data_id)
if data_id in self.data_ot_idx:
self.data_ot_idx[data_id].append(idx)
else:
self.data_ot_idx[data_id] = [idx]
self.data_ot[obj_id] = data_id
obj_id+=1
# set_trace()
self.data_grasp_num = np.zeros(len(self.data_ot_idx))
for (i,data_ot_idx_each) in enumerate(self.data_ot_idx):
# set_trace()
self.data_grasp_num[i] = len(self.data_ot_idx[data_ot_idx_each])
print('data initilized!')
# need to overload
def __len__(self):
return len(self.data_ot_idx)
# need to overload
def __getitem__(self, idx):
# sampled_data = np.zeros(len(idx),self.data_dim)
# set_trace()
sampled_idx = np.random.randint(0, self.data_grasp_num[idx])
# print(idx,sampled_idx)
sampled_data = self.dataset[self.data_ot_idx[self.data_ot[idx]][sampled_idx]]
# set_trace()
return sampled_data
# Path: Runners/TrainSDE_update.py
import isaacgym
import condexenvs
import argparse
import functools
import sys
import os
import cv2
import numpy as np
import tqdm
import time
import pickle
import random
import torch
import torch.optim as optim
from ipdb import set_trace
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from torchvision.utils import make_grid
from Algorithms.SDE_update import loss_fn_cond, cond_ode_sampler, init_sde, ExponentialMovingAverage
from Networks.SDENets_update import CondScoreModel
from utils.utils import exists_or_mkdir, save_video, get_dict_key, DexDataset
#!/usr/bin/env python
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
points_per_object = 1024
vis_image = False
max_bz = 256
if __name__ == "__main__":
| parser = argparse.ArgumentParser() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ApolloAuto/apollo-model-centerpoint
# Path: paddle3d/apis/manager.py
class ComponentManager:
def __init__(self, *, name: str, description: str = ''):
def __len__(self):
def __repr__(self):
def __getitem__(self, item: str):
def components_dict(self) -> dict:
def name(self) -> str:
def description(self) -> str:
def _add_single_component(self, component: Callable):
def add_component(self, components: Union[Callable, Iterable[Callable]]
) -> Union[Callable, Iterable[Callable]]:
VOXEL_ENCODERS = ComponentManager(name="voxel_encoders")
MIDDLE_ENCODERS = ComponentManager(name="middle_encoders")
BACKBONES = ComponentManager(name="backbones")
MODELS = ComponentManager(name="models")
NECKS = ComponentManager(name="necks")
HEADS = ComponentManager(name="heads")
LOSSES = ComponentManager(name="losses")
DATASETS = ComponentManager(name="datasets")
TRANSFORMS = ComponentManager(name="transforms")
LR_SCHEDULERS = ComponentManager(name="lr_schedulers")
OPTIMIZERS = ComponentManager(name="optimizers")
VOXELIZERS = ComponentManager(name="voxelizers")
POINT_ENCODERS = ComponentManager(name="point_encoders")
POSITIONAL_ENCODING = ComponentManager(name="POSITIONAL_ENCODING")
TRANSFORMERS = ComponentManager(name="TRANSFORMERS")
TRANSFORMER_ENCODERS = ComponentManager(name="TRANSFORMER_ENCODERS")
TRANSFORMER_ENCODER_LAYERS = ComponentManager(name="TRANSFORMER_ENCODER_LAYERS")
ATTENTIONS = ComponentManager(name="ATTENTIONS")
BBOX_CODERS = ComponentManager(name="BBOX_CODERS")
BBOX_ASSIGNERS = ComponentManager(name="BBOX_ASSIGNERS")
MATCH_COSTS = ComponentManager(name="MATCH_COSTS")
BBOX_SAMPLERS = ComponentManager(name="BBOX_SAMPLERS")
TRANSFORMER_DECODER_LAYERS = ComponentManager(name="TRANSFORMER_DECODER_LAYERS")
TRANSFORMER_DECODERS = ComponentManager(name="TRANSFORMER_DECODERS")
# Path: paddle3d/geometries/bbox.py
class BBoxes3D(_Structure):
"""
"""
def __init__(self,
data: np.ndarray,
coordmode: CoordMode = 0,
velocities: List[float] = None,
origin: List[float] = [0.5, 0.5, 0.5],
rot_axis: int = 2):
if not isinstance(data, np.ndarray):
data = np.array(data)
self.coordmode = coordmode
self.velocities = velocities
self.origin = origin
self.rot_axis = rot_axis
@property
def corners_3d(self):
# corners_3d format: x0y0z0, x0y0z1, x0y1z1, x0y1z0, x1y0z0, x1y0z1, x1y1z1, x1y1z0
dx, dy, dz = self[:, 3:6].T
b = dz.shape[0]
x_corners = np.array([[0., 0., 0., 0., 1., 1., 1., 1.]],
self.dtype).repeat(
b, axis=0)
y_corners = np.array([[0., 0., 1., 1., 0., 0., 1., 1.]],
self.dtype).repeat(
b, axis=0)
z_corners = np.array([[0., 1., 1., 0., 0., 1., 1., 0.]],
self.dtype).repeat(
b, axis=0)
x_corners = (
dx[:, np.newaxis] * (x_corners - self.origin[0]))[:, :, np.newaxis]
y_corners = (
dy[:, np.newaxis] * (y_corners - self.origin[1]))[:, :, np.newaxis]
z_corners = (
dz[:, np.newaxis] * (z_corners - self.origin[2]))[:, :, np.newaxis]
corners = np.concatenate([x_corners, y_corners, z_corners], axis=-1)
angle = self[:, -1]
corners = rotation_3d_in_axis(corners, angle, axis=self.rot_axis)
centers = self[:, 0:3][:, np.newaxis, :]
corners += centers
return corners
@property
def corners_2d(self):
# corners_2d format: x0y0, x0y1, x1y1, x1y0
dx, dy = self[:, 3:5].T
b = dy.shape[0]
x_corners = np.array([[0., 0., 1., 1.]], self.dtype).repeat(b, axis=0)
y_corners = np.array([[0., 1., 1., 0.]], self.dtype).repeat(b, axis=0)
x_corners = (
dx[:, np.newaxis] * (x_corners - self.origin[0]))[:, :, np.newaxis]
y_corners = (
dy[:, np.newaxis] * (y_corners - self.origin[1]))[:, :, np.newaxis]
corners = np.concatenate([x_corners, y_corners], axis=-1)
angle = self[:, -1]
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
rotation_matrix = np.array([[rot_cos, -rot_sin], [rot_sin, rot_cos]],
dtype=self.dtype)
#rotation_matrix = rotation_matrix.transpose([2, 0, 1])
#corners = corners @ rotation_matrix #TODO(luoqianhui)
corners = np.einsum("aij,jka->aik", corners, rotation_matrix)
centers = self[:, 0:2][:, np.newaxis, :]
corners += centers
return corners
def scale(self, factor: float):
"""
"""
# Scale x, y, z, w, l, h, except the orientation
self[..., :-1] = self[..., :-1] * factor
# Scale velocities
if self.velocities is not None:
self.velocities[..., :] = self.velocities[..., :] * factor
def translate(self, translation: np.ndarray):
self[..., :3] = self[..., :3] + translation
def rotate_around_z(self, angle: np.ndarray):
# Rotation matrix around the z-axis
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
rotation_matrix = np.array(
[[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]],
dtype=self.dtype)
# Rotate x,y,z
self[..., :3] = self[..., :3] @ rotation_matrix
# Rotate velocities
if self.velocities is not None:
self.velocities[..., :2] = (np.hstack([
self.velocities[..., :2],
np.zeros(
(self.velocities.shape[0], 1), dtype=self.velocities.dtype)
]) @ rotation_matrix)[..., :2]
# Update orientation
self[..., -1] += angle
def horizontal_flip(self):
"""
The inputs are pixel indices
"""
self[:, 0] = -self[:, 0]
if self.velocities is not None:
self.velocities[:, 0] = -self.velocities[:, 0]
self[:,
-1] = -self[:,
-1] + 2 * np.pi # TODO(luoqianhui): CHECK THIS 2 * np.pi is needed
def horizontal_flip_coords(self):
"""
The inputs are floating point coordinates
"""
new_box3d_quat = np.stack(
[self[:, 3], -self[:, 2], -self[:, 1], self[:, 0]], 1)
self[:, :4] = new_box3d_quat
self[:, 4] = -self[:, 4]
def to_vision_based_3d_box(self):
height, width, length = self[:, 3:4], self[:, 4:5], self[:, 5:6]
x, y, z = self[:, 0:1], self[:, 1:2], self[:, 2:3]
rotation = self[:, 6]
tvec = np.concatenate([x, y - height / 2, z], axis=1)
box_pose = []
for i in range(rotation.shape[0]):
wxyz = Quaternion(
Quaternion(axis=[1, 0, 0], radians=np.pi / 2) * Quaternion(
axis=[0, 0, 1], radians=-rotation[i]))
box_pose.append(wxyz.elements.astype(np.float32))
box_pose = np.stack(box_pose, axis=0)
box3d_new = np.concatenate([box_pose, tvec, width, length, height],
axis=1)
return box3d_new
def vertical_flip(self):
self[:, 1] = -self[:, 1]
if self.velocities is not None:
self.velocities[:, 1] = -self.velocities[:, 1]
self[:, -1] = -self[:, -1] + np.pi
@staticmethod
def limit_period(val, offset: float = 0.5, period: float = np.pi):
return val - np.floor(val / period + offset) * period
def get_mask_of_bboxes_outside_range(self, point_cloud_range: np.ndarray):
bboxes_bev = self.corners_2d
# Represent the bev range as a bounding box
limit_polygons = minmax_range_3d_to_corner_2d(point_cloud_range)
mask = points_in_convex_polygon_2d(
bboxes_bev.reshape(-1, 2), limit_polygons)
return np.any(mask.reshape(-1, 4), axis=1)
def get_mask_of_small_bboxes(self, size_thr: np.ndarray):
dim = self[:, 3:6]
thr = size_thr.reshape(1, 3).repeat(self.shape[0], axis=0)
mask = np.array((dim > thr))
mask = np.all(mask, axis=1)
return mask.nonzero()
def masked_select(self, mask):
selected_data = self[mask]
selected_velocities = self.velocities
if self.velocities is not None:
selected_velocities = self.velocities[mask]
selected_bbox = BBoxes3D(selected_data, self.coordmode,
selected_velocities, self.origin,
self.rot_axis)
return selected_bbox
# Path: paddle3d/sample.py
class Sample(_EasyDict):
"""
"""
_VALID_MODALITIES = ["image", "lidar", "radar", "multimodal", "multiview"]
def __init__(self, path: str, modality: str):
if modality not in self._VALID_MODALITIES:
raise ValueError('Only modality {} is supported, but got {}'.format(
self._VALID_MODALITIES, modality))
self.meta = SampleMeta()
self.path = path
self.data = None
self.modality = modality.lower()
self.bboxes_2d = None
self.bboxes_3d = None
self.labels = None
self.sweeps = []
self.attrs = None
# Path: paddle3d/sample.py
class SampleMeta(_EasyDict):
"""
"""
# yapf: disable
__slots__ = [
"camera_intrinsic",
# bgr or rgb
"image_format",
# pillow or cv2
"image_reader",
# chw or hwc
"channel_order",
# Unique ID of the sample
"id",
"time_lag",
"ref_from_curr"
]
# yapf: enable
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
# Path: paddle3d/utils/amp_utils.py
def dtype2float32(src_tensors):
if isinstance(src_tensors,
paddle.Tensor) and src_tensors.dtype != 'float32':
return src_tensors.astype('float32')
elif isinstance(src_tensors, Sequence):
return type(src_tensors)([dtype2float32(x) for x in src_tensors])
elif isinstance(src_tensors, Mapping):
return {key: dtype2float32(x) for key, x in src_tensors.items()}
return src_tensors
# Path: paddle3d/utils/grid.py
class GridMask(nn.Layer):
"""
This class is modified from https://github.com/fundamentalvision/BEVFormer/blob/master/projects/mmdet3d_plugin/models/utils/grid_mask.py#L70
"""
def __init__(self,
use_h,
use_w,
rotate=1,
offset=False,
ratio=0.5,
mode=0,
prob=1.):
super(GridMask, self).__init__()
self.use_h = use_h
self.use_w = use_w
self.rotate = rotate
self.offset = offset
self.ratio = ratio
self.mode = mode
self.st_prob = prob
self.prob = prob
def set_prob(self, epoch, max_epoch):
self.prob = self.st_prob * epoch / max_epoch #+ 1.#0.5
def forward(self, x):
#np.random.seed(0)
if np.random.rand() > self.prob or not self.training:
return x
n, c, h, w = x.shape
x = x.reshape([-1, h, w])
hh = int(1.5 * h)
ww = int(1.5 * w)
#np.random.seed(0)
d = np.random.randint(2, h)
self.l = min(max(int(d * self.ratio + 0.5), 1), d - 1)
mask = np.ones((hh, ww), np.float32)
#np.random.seed(0)
st_h = np.random.randint(d)
#np.random.seed(0)
st_w = np.random.randint(d)
if self.use_h:
for i in range(hh // d):
s = d * i + st_h
t = min(s + self.l, hh)
mask[s:t, :] *= 0
if self.use_w:
for i in range(ww // d):
s = d * i + st_w
t = min(s + self.l, ww)
mask[:, s:t] *= 0
#np.random.seed(0)
r = np.random.randint(self.rotate)
mask = Image.fromarray(np.uint8(mask))
mask = mask.rotate(r)
mask = np.asarray(mask)
mask = mask[(hh - h) // 2:(hh - h) // 2 +
h, (ww - w) // 2:(ww - w) // 2 + w]
mask = paddle.to_tensor(mask, dtype=x.dtype)
if self.mode == 1:
mask = 1 - mask
mask = mask.expand_as(x)
if self.offset:
#np.random.seed(0)
offset = paddle.to_tensor(
2 * (np.random.rand(h, w) - 0.5), dtype=x.dtype)
x = x * mask + offset * (1 - mask)
else:
x = x * mask
return x.reshape([n, c, h, w])
# Path: paddle3d/utils/logger.py
class Logger(object):
class ProgressBar(object):
def __init__(self, name: str = None):
def format(self):
def disable(self):
def enable(self):
def enabled(self) -> bool:
def __call__(self, log_level: str, msg: str):
def use_terminator(self, terminator: str):
def processing(self, msg: str, flush_interval: float = 0.1):
def _printer():
def progressbar(self, msg: str, flush_interval: float = 0.1):
def range(self, stop: int, msg: str):
def enumerate(self, iterable: Iterable, msg: str):
def __init__(self, logger: Logger, flush_interval: float = 0.1):
def update(self, progress: float):
# Path: paddle3d/models/detection/bevformer/bevformer.py
import collections
import copy
import os
import numpy as np
import paddle
import paddle.nn as nn
from typing import Dict, List
from paddle3d.apis import manager
from paddle3d.geometries import BBoxes3D
from paddle3d.sample import Sample, SampleMeta
from paddle3d.utils import dtype2float32
from paddle3d.utils.grid import GridMask
from paddle3d.utils.logger import logger
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# Modified from BEVFormer (https://github.com/fundamentalvision/BEVFormer)
# Copyright (c) OpenMMLab. All rights reserved.
# ------------------------------------------------------------------------
@manager.MODELS.add_component
class BEVFormer(nn.Layer):
def __init__(self,
backbone,
neck,
pts_bbox_head,
use_grid_mask=False,
pretrained=None,
video_test_mode=False):
super(BEVFormer, self).__init__()
self.grid_mask = GridMask(
| True, True, rotate=1, offset=False, ratio=0.5, mode=1, prob=0.7) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: corcel-api/cortex.t
# Path: template/protocol.py
class Embeddings( bt.Synapse):
""" A class to represent the embeddings request and response. """
texts: List[str] = pydantic.Field(
...,
title="Text",
description="The list of input texts for which embeddings are to be generated."
)
model: str = pydantic.Field(
default="text-embedding-ada-002",
title="Model",
description="The model used for generating embeddings."
)
embeddings: Optional[List[List[float]]] = pydantic.Field(
None,
title="Embeddings",
description="The resulting list of embeddings, each corresponding to an input text."
)
# Path: template/protocol.py
class ImageResponse(bt.Synapse):
""" A class to represent the response for an image-related request. """
# https://platform.stability.ai/docs/api-reference#tag/v1generation/operation/textToImage
completion: Optional[Dict] = pydantic.Field(
None,
title="Completion",
description="The completion data of the image response."
)
messages: str = pydantic.Field(
...,
title="Messages",
description="Messages related to the image response."
)
provider: str = pydantic.Field(
default="OpenAI",
title="Provider",
description="The provider to use when calling for your response."
)
seed: int = pydantic.Field(
default=1234,
title="Seed",
description="The seed that which to generate the image with"
)
samples: int = pydantic.Field(
default=1,
title="Samples",
description="The number of samples to generate"
)
cfg_scale: float = pydantic.Field(
default=8.0,
title="cfg_scale",
description="The cfg_scale to use for image generation"
)
# (Available Samplers: ddim, plms, k_euler, k_euler_ancestral, k_heun, k_dpm_2, k_dpm_2_ancestral, k_dpmpp_2s_ancestral, k_lms, k_dpmpp_2m, k_dpmpp_sde)
sampler: str = pydantic.Field(
default="",
title="Sampler",
description="The sampler to use for image generation"
)
steps: int = pydantic.Field(
default=30,
title="Seed",
description="The steps to take in generating the image"
)
model: str = pydantic.Field(
default="dall-e-2",
title="Model",
description="The model used for generating the image."
)
style: str = pydantic.Field(
default="vivid",
title="Style",
description="The style of the image."
)
size: str = pydantic.Field(
default="1024x1024",
title="The size of the image, used for Openai generation. Options are 1024x1024, 1792x1024, 1024x1792 for dalle3",
description="The size of the image."
)
height: int = pydantic.Field(
default=1024,
title="Height used for non Openai images",
description="height"
)
width: int = pydantic.Field(
default=1024,
title="Width used for non Openai images",
description="width"
)
quality: str = pydantic.Field(
default="standard",
title="Quality",
description="The quality of the image."
)
required_hash_fields: List[str] = pydantic.Field(
["messages"],
title="Required Hash Fields",
description="A list of fields required for the hash."
)
def deserialize(self) -> Optional[Dict]:
""" Deserialize the completion data of the image response. """
return self.completion
# Path: template/protocol.py
class IsAlive( bt.Synapse ):
answer: Optional[str] = None
completion: str = pydantic.Field(
"",
title="Completion",
description="Completion status of the current StreamPrompting object. "
"This attribute is mutable and can be updated.",
)
# Path: template/protocol.py
class StreamPrompting(bt.StreamingSynapse):
messages: List[Dict[str, str]] = pydantic.Field(
...,
title="Messages",
description="A list of messages in the StreamPrompting scenario, "
"each containing a role and content. Immutable.",
allow_mutation=False,
)
required_hash_fields: List[str] = pydantic.Field(
["messages"],
title="Required Hash Fields",
description="A list of required fields for the hash.",
allow_mutation=False,
)
seed: int = pydantic.Field(
default="1234",
title="Seed",
description="Seed for text generation. This attribute is immutable and cannot be updated.",
)
temperature: float = pydantic.Field(
default=0.0001,
title="Temperature",
description="Temperature for text generation. "
"This attribute is immutable and cannot be updated.",
)
max_tokens: int = pydantic.Field(
default=2048,
title="Max Tokens",
description="Max tokens for text generation. "
"This attribute is immutable and cannot be updated.",
)
top_p: float = pydantic.Field(
default=0.001,
title="Top_p",
description="Top_p for text generation. The sampler will pick one of "
"the top p percent tokens in the logit distirbution. "
"This attribute is immutable and cannot be updated.",
)
top_k: int = pydantic.Field(
default=1,
title="Top_k",
description="Top_k for text generation. Sampler will pick one of "
"the k most probablistic tokens in the logit distribtion. "
"This attribute is immutable and cannot be updated.",
)
completion: str = pydantic.Field(
None,
title="Completion",
description="Completion status of the current StreamPrompting object. "
"This attribute is mutable and can be updated.",
)
provider: str = pydantic.Field(
default="OpenAI",
title="Provider",
description="The provider to use when calling for your response."
)
model: str = pydantic.Field(
default="gpt-3.5-turbo",
title="model",
description="The model to use when calling provider for your response.",
)
async def process_streaming_response(self, response: StreamingResponse) -> AsyncIterator[str]:
if self.completion is None:
self.completion = ""
async for chunk in response.content.iter_any():
tokens = chunk.decode("utf-8")
for token in tokens:
if token:
self.completion += token
yield tokens
def deserialize(self) -> str:
return self.completion
def extract_response_json(self, response: StreamingResponse) -> dict:
headers = {
k.decode("utf-8"): v.decode("utf-8")
for k, v in response.__dict__["_raw_headers"]
}
def extract_info(prefix: str) -> dict[str, str]:
return {
key.split("_")[-1]: value
for key, value in headers.items()
if key.startswith(prefix)
}
return {
"name": headers.get("name", ""),
"timeout": float(headers.get("timeout", 0)),
"total_size": int(headers.get("total_size", 0)),
"header_size": int(headers.get("header_size", 0)),
"dendrite": extract_info("bt_header_dendrite"),
"axon": extract_info("bt_header_axon"),
"messages": self.messages,
"completion": self.completion,
}
# Path: template/utils.py
def get_version(line_number: int = 22) -> Optional[str]:
url = "https://api.github.com/repos/corcel-api/cortex.t/contents/template/__init__.py"
response = requests.get(url, timeout=10)
if not response.ok:
bt.logging.error("github api call failed")
return None
content = response.json()['content']
decoded_content = base64.b64decode(content).decode('utf-8')
lines = decoded_content.split('\n')
if line_number > len(lines):
raise Exception("Line number exceeds file length")
version_line = lines[line_number - 1]
version_match = re.search(r'__version__ = "(.*?)"', version_line)
if not version_match:
raise Exception("Version information not found in the specified line")
return version_match.group(1)
# Path: miner/claude_miner.py
import base # noqa
import argparse
import asyncio
import copy
import json
import os
import io
import base64
import boto3
import pathlib
import threading
import time
import requests
import traceback
import requests
import anthropic
import bittensor as bt
import wandb
import stability_sdk.interfaces.gooseai.generation.generation_pb2 as generation
import template
import sys
from abc import ABC, abstractmethod
from collections import deque
from functools import partial
from typing import Tuple
from stability_sdk import client
from config import check_config, get_config
from openai import AsyncOpenAI, OpenAI
from PIL import Image
from anthropic_bedrock import AsyncAnthropicBedrock, HUMAN_PROMPT, AI_PROMPT, AnthropicBedrock
from template.protocol import Embeddings, ImageResponse, IsAlive, StreamPrompting
from template.utils import get_version
from starlette.types import Send
f"Serving axon {StreamPrompting} "
f"on network: {self.config.subtensor.chain_endpoint} "
f"with netuid: {self.config.netuid}"
)
self.axon.serve(netuid=self.config.netuid, subtensor=self.subtensor)
bt.logging.info(f"Starting axon server on port: {self.config.axon.port}")
self.axon.start()
self.last_epoch_block = self.subtensor.get_current_block()
bt.logging.info(f"Miner starting at block: {self.last_epoch_block}")
bt.logging.info("Starting main loop")
step = 0
try:
while not self.should_exit:
_start_epoch = time.time()
# --- Wait until next epoch.
current_block = self.subtensor.get_current_block()
while (
current_block - self.last_epoch_block
< self.config.miner.blocks_per_epoch
):
# --- Wait for next bloc.
time.sleep(1)
current_block = self.subtensor.get_current_block()
# --- Check if we should exit.
if self.should_exit:
break
# --- Update the metagraph with the latest network state.
self.last_epoch_block = self.subtensor.get_current_block()
metagraph = self.subtensor.metagraph(
netuid=self.config.netuid,
lite=True,
block=self.last_epoch_block,
)
log = (
f"Step:{step} | "
f"Block:{metagraph.block.item()} | "
f"Stake:{metagraph.S[self.my_subnet_uid]} | "
f"Rank:{metagraph.R[self.my_subnet_uid]} | "
f"Trust:{metagraph.T[self.my_subnet_uid]} | "
f"Consensus:{metagraph.C[self.my_subnet_uid] } | "
f"Incentive:{metagraph.I[self.my_subnet_uid]} | "
f"Emission:{metagraph.E[self.my_subnet_uid]}"
)
bt.logging.info(log)
# --- Set weights.
if not self.config.miner.no_set_weights:
pass
step += 1
except KeyboardInterrupt:
self.axon.stop()
bt.logging.success("Miner killed by keyboard interrupt.")
sys.exit()
except Exception:
bt.logging.error(traceback.format_exc())
def run_in_background_thread(self) -> None:
if not self.is_running:
bt.logging.debug("Starting miner in background thread.")
self.should_exit = False
self.thread = threading.Thread(target=self.run, daemon=True)
self.thread.start()
self.is_running = True
bt.logging.debug("Started")
def stop_run_thread(self) -> None:
if self.is_running:
bt.logging.debug("Stopping miner in background thread.")
self.should_exit = True
self.thread.join(5)
self.is_running = False
bt.logging.debug("Stopped")
def __enter__(self):
self.run_in_background_thread()
def __exit__(self, exc_type, exc_value, traceback):
self.stop_run_thread()
class StreamingTemplateMiner(StreamMiner):
def config(self) -> bt.config:
parser = argparse.ArgumentParser(description="Streaming Miner Configs")
self.add_args(parser)
return bt.config(parser)
def add_args(cls, parser: argparse.ArgumentParser):
pass
async def embeddings(self, synapse: Embeddings) -> Embeddings:
bt.logging.info(f"entered embeddings processing for embeddings of len {len(synapse.texts)}")
async def get_embeddings_in_batch(texts, model, batch_size=10):
batches = [texts[i:i + batch_size] for i in range(0, len(texts), batch_size)]
tasks = []
for batch in batches:
filtered_batch = [text for text in batch if text.strip()]
if filtered_batch:
task = asyncio.create_task(client.embeddings.create(
input=filtered_batch, model=model, encoding_format='float'
))
tasks.append(task)
else:
bt.logging.info("Skipped an empty batch.")
all_embeddings = []
results = await asyncio.gather(*tasks, return_exceptions=True)
for result in results:
if isinstance(result, Exception):
bt.logging.error(f"Error in processing batch: {result}")
else:
batch_embeddings = [item.embedding for item in result.data]
all_embeddings.extend(batch_embeddings)
return all_embeddings
| try: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ljy0ustc/LLaRA
# Path: model/model_interface.py
class MInterface(pl.LightningModule):
def __init__(self,
**kargs):
super().__init__()
self.save_hyperparameters()
self.load_llm(self.hparams.llm_path)
self.load_rec_model(self.hparams.rec_model_path)
self.load_projector()
def forward(self, batch):
targets = batch["tokens"].input_ids.masked_fill(
batch["tokens"].input_ids == self.llama_tokenizer.pad_token_id, -100
) # [batch_size, max_len]
targets = targets.masked_fill((batch["tokens"].token_type_ids == 0)[:,1:], -100)
input_embeds = self.wrap_emb(batch)
outputs = self.llama_model(
inputs_embeds=input_embeds,
attention_mask=batch["tokens"].attention_mask,
return_dict=True,
labels=targets,
use_cache=False
)
return outputs
def generate(self, batch,temperature=0.8,do_sample=False,num_beams=1,max_gen_length=64,min_gen_length=1,repetition_penalty=1.0,length_penalty=1.0, num_return_sequences=1):
input_embeds = self.wrap_emb(batch)
generate_ids = self.llama_model.generate(
inputs_embeds=input_embeds,
attention_mask=batch["tokens"].attention_mask,
temperature=temperature,
do_sample=do_sample,
num_beams=num_beams,
max_new_tokens=max_gen_length,
min_new_tokens=min_gen_length,
pad_token_id=self.llama_tokenizer.pad_token_id,
repetition_penalty=repetition_penalty,
length_penalty=length_penalty,
num_return_sequences=num_return_sequences
)
output_text=self.llama_tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
outputs=[text.strip() for text in output_text]
return outputs
def training_step(self, batch, batch_idx):
if self.scheduler:
self.scheduler.step(self.trainer.global_step, self.current_epoch, self.trainer.max_steps)
if batch["flag"]:
for name, param in self.projector.named_parameters():
param.requires_grad = False
else:
for name, param in self.projector.named_parameters():
param.requires_grad = True
out = self(batch)
loss = self.configure_loss(out)
self.log('loss', loss, on_step=True, on_epoch=True, prog_bar=True)
self.log('lr', self.scheduler.optimizer.param_groups[0]['lr'], on_step=True, on_epoch=True, prog_bar=True)
self.log('global_step_num', self.trainer.global_step, on_step=True, on_epoch=True, prog_bar=True)
return loss
def on_validation_epoch_start(self):
self.val_content={
"generate":[],
"real":[],
"cans":[],
}
@torch.no_grad()
def validation_step(self, batch, batch_idx):
generate_output = self.generate(batch)
output=[]
for i,generate in enumerate(generate_output):
real=batch['correct_answer'][i]
cans=batch['cans_name'][i]
generate=generate.strip().split("\n")[0]
output.append((generate,real,cans))
return output
def on_validation_batch_end(self, outputs, batch, batch_idx, dataloader_idx):
for generate,real,cans in outputs:
self.val_content["generate"].append(generate)
self.val_content["real"].append(real)
self.val_content["cans"].append(cans)
def on_validation_epoch_end(self):
df=DataFrame(self.val_content)
if not os.path.exists(self.hparams.output_dir):
os.makedirs(self.hparams.output_dir)
df.to_csv(op.join(self.hparams.output_dir, 'valid.csv'))
prediction_valid_ratio,hr=self.calculate_hr1(self.val_content)
metric=hr*prediction_valid_ratio
self.log('val_prediction_valid', prediction_valid_ratio, on_step=False, on_epoch=True, prog_bar=True)
self.log('val_hr', hr, on_step=False, on_epoch=True, prog_bar=True)
self.log('metric', metric, on_step=False, on_epoch=True, prog_bar=True)
def on_test_epoch_start(self):
self.test_content={
"generate":[],
"real":[],
"cans":[],
}
@torch.no_grad()
def test_step(self, batch, batch_idx):
generate_output = self.generate(batch)
output=[]
for i,generate in enumerate(generate_output):
real=batch['correct_answer'][i]
cans=batch['cans_name'][i]
generate=generate.strip().split("\n")[0]
output.append((generate,real,cans))
return output
def on_test_batch_end(self, outputs, batch, batch_idx, dataloader_idx):
for generate,real,cans in outputs:
self.test_content["generate"].append(generate)
self.test_content["real"].append(real)
self.test_content["cans"].append(cans)
def on_test_epoch_end(self):
df=DataFrame(self.test_content)
if not os.path.exists(self.hparams.output_dir):
os.makedirs(self.hparams.output_dir)
df.to_csv(op.join(self.hparams.output_dir, 'test.csv'))
prediction_valid_ratio,hr=self.calculate_hr1(self.test_content)
metric=hr*prediction_valid_ratio
self.log('test_prediction_valid', prediction_valid_ratio, on_step=False, on_epoch=True, prog_bar=True)
self.log('test_hr', hr, on_step=False, on_epoch=True, prog_bar=True)
self.log('metric', metric, on_step=False, on_epoch=True, prog_bar=True)
def configure_optimizers(self):
if hasattr(self.hparams, 'weight_decay'):
weight_decay = self.hparams.weight_decay
else:
weight_decay = 0
optimizer = torch.optim.Adam([
{'params': self.projector.parameters(), 'lr': self.hparams.lr, 'weight_decay':weight_decay},
{'params': self.llama_model.parameters(), 'lr': self.hparams.lr}
])
if self.hparams.lr_scheduler is None:
return optimizer
else:
max_step = self.trainer.max_steps
warmup_steps = max_step // 20
print(f'max_step: {max_step}')
print(f'warmup_steps: {warmup_steps}')
if self.hparams.lr_scheduler == 'cosine':
self.scheduler = LinearWarmupCosineLRScheduler(optimizer,
max_step=max_step,
min_lr=self.hparams.lr_decay_min_lr,
init_lr=self.hparams.lr,
warmup_steps=warmup_steps,
warmup_start_lr=self.hparams.lr_warmup_start_lr)
else:
self.scheduler = None
raise ValueError('Invalid lr_scheduler type!')
return optimizer
def configure_loss(self, out, labels=None):
loss = self.hparams.loss.lower()
if loss == 'lm':
return out.loss
else:
raise ValueError("Invalid Loss Type!")
def on_save_checkpoint(self, checkpoint):
if self.hparams.save == 'part':
checkpoint.pop('optimizer_states')
to_be_removed = []
for key, value in checkpoint['state_dict'].items():
try:
if not self.get_parameter(key).requires_grad:
to_be_removed.append(key)
except AttributeError:
to_be_removed.append(key)
for key in to_be_removed:
checkpoint['state_dict'].pop(key)
elif self.hparams.save == 'all':
pass
def load_llm(self, llm_path):
print('Loading LLAMA')
self.llama_tokenizer = LlamaTokenizer.from_pretrained(llm_path, use_fast=False)
self.llama_tokenizer.pad_token = self.llama_tokenizer.eos_token
self.llama_tokenizer.add_special_tokens({'pad_token': '[PAD]'})
self.llama_tokenizer.padding_side = "right"
self.llama_tokenizer.add_special_tokens({'additional_special_tokens': ['[PH]','[HistoryEmb]','[CansEmb]','[ItemEmb]']})
self.llama_model = LlamaForCausalLM.from_pretrained(llm_path, torch_dtype=torch.bfloat16)
self.llama_model.resize_token_embeddings(len(self.llama_tokenizer))
if self.hparams.llm_tuning == 'lora':
if self.hparams.peft_dir:
self.llama_model = PeftModel.from_pretrained(self.llm_model, self.hparams.peft_dir, is_trainable=True)
else:
if self.hparams.peft_config:
peft_config = LoraConfig(**LoraConfig.from_json_file(self.hparams.peft_config))
else:
peft_config = LoraConfig(task_type=TaskType.CAUSAL_LM,
inference_mode=False,
r=self.hparams.lora_r,
lora_alpha=self.hparams.lora_alpha,
lora_dropout=self.hparams.lora_dropout,
target_modules=['k_proj', 'v_proj', 'q_proj', 'o_proj', 'gate_proj', 'up_proj', 'down_proj'])
self.peft_config = peft_config
self.llama_model = get_peft_model(self.llama_model, peft_config)
self.llama_model.print_trainable_parameters()
elif self.hparams.llm_tuning == 'freeze':
for name, param in self.llama_model.named_parameters():
param.requires_grad = False
elif self.hparams.llm_tuning == 'freeze_lora':
if self.hparams.peft_dir:
self.llama_model = PeftModel.from_pretrained(self.llm_model, self.hparams.peft_dir, is_trainable=True)
else:
if self.hparams.peft_config:
peft_config = LoraConfig(**LoraConfig.from_json_file(self.hparams.peft_config))
else:
peft_config = LoraConfig(task_type=TaskType.CAUSAL_LM,
inference_mode=False,
r=self.hparams.lora_r,
lora_alpha=self.hparams.lora_alpha,
lora_dropout=self.hparams.lora_dropout,
target_modules=['k_proj', 'v_proj', 'q_proj', 'o_proj', 'gate_proj', 'up_proj', 'down_proj'])
self.peft_config = peft_config
self.llama_model = get_peft_model(self.llama_model, peft_config)
for name, param in self.llama_model.named_parameters():
param.requires_grad = False
self.llama_model.print_trainable_parameters()
else:
raise NotImplementedError()
print('Loading LLAMA Done')
def load_projector(self):
name = self.hparams.model_name
camel_name = ''.join([i.capitalize() for i in name.split('_')])
try:
Model = getattr(importlib.import_module(
'.'+name, package=__package__), camel_name)
except:
raise ValueError(
f'Invalid Module File Name or Invalid Class Name {name}.{camel_name}!')
self.projector = self.instancialize(Model, rec_size=self.hparams.rec_size, llm_size=self.llama_model.config.hidden_size)
def instancialize(self, Model, **other_args):
class_args = inspect.getargspec(Model.__init__).args[1:]
inkeys = self.hparams.keys()
args1 = {}
for arg in class_args:
if arg in inkeys:
args1[arg] = getattr(self.hparams, arg)
args1.update(other_args)
return Model(**args1)
def load_rec_model(self, rec_model_path):
print('Loading Rec Model')
self.rec_model = torch.load(rec_model_path, map_location="cpu")
self.rec_model.eval()
for name, param in self.rec_model.named_parameters():
param.requires_grad = False
print('Loding Rec model Done')
def encode_items(self, seq):
if self.hparams.rec_embed=="SASRec":
item_rec_embs=self.rec_model.cacu_x(seq)
elif self.hparams.rec_embed in ['Caser','GRU']:
item_rec_embs=self.rec_model.item_embeddings(seq)
item_txt_embs=self.projector(item_rec_embs)
return item_txt_embs
def embed_tokens(self, token_ids):
embeds = self.llama_model.base_model.embed_tokens(token_ids)
return embeds
def wrap_emb(self, batch):
input_embeds = self.llama_model.get_input_embeddings()(batch["tokens"].input_ids)
his_token_id=self.llama_tokenizer("[HistoryEmb]", return_tensors="pt",add_special_tokens=False).input_ids.item()
cans_token_id=self.llama_tokenizer("[CansEmb]", return_tensors="pt",add_special_tokens=False).input_ids.item()
item_token_id=self.llama_tokenizer("[ItemEmb]", return_tensors="pt",add_special_tokens=False).input_ids.item()
his_item_embeds= self.encode_items(batch["seq"])
cans_item_embeds= self.encode_items(batch["cans"])
item_embeds=self.encode_items(batch["item_id"])
for i in range(len(batch["len_seq"])):
if (batch["tokens"].input_ids[i]==his_token_id).nonzero().shape[0]>0:
idx_tensor=(batch["tokens"].input_ids[i]==his_token_id).nonzero().view(-1)
for idx, item_emb in zip(idx_tensor,his_item_embeds[i,:batch["len_seq"][i].item()]):
input_embeds[i,idx]=item_emb
if (batch["tokens"].input_ids[i]==cans_token_id).nonzero().shape[0]>0:
idx_tensor=(batch["tokens"].input_ids[i]==cans_token_id).nonzero().view(-1)
for idx, item_emb in zip(idx_tensor,cans_item_embeds[i,:batch["len_cans"][i].item()]):
input_embeds[i,idx]=item_emb
if (batch["tokens"].input_ids[i]==item_token_id).nonzero().shape[0]>0:
idx=(batch["tokens"].input_ids[i]==item_token_id).nonzero().item()
input_embeds[i,idx]=item_embeds[i]
return input_embeds
def calculate_hr1(self,eval_content):
correct_num=0
valid_num=0
total_num=0
for i,generate in enumerate(eval_content["generate"]):
real=eval_content["real"][i]
cans=eval_content["cans"][i]
total_num+=1
generate=generate.strip().lower().strip()
real=real.strip().lower().strip()
cans=[item.strip().lower().strip() for item in cans]
gen_cans_list=[]
for cans_item in cans:
if cans_item in generate:
gen_cans_list.append(cans_item)
if len(gen_cans_list)==1:
valid_num+=1
if real == gen_cans_list[0]:
correct_num+=1
valid_ratio=valid_num/total_num
if valid_num>0:
hr1=correct_num/valid_num
else:
hr1=0
return valid_ratio,hr1
# Path: data/data_interface.py
class DInterface(pl.LightningDataModule):
def __init__(self,
llm_tokenizer=None,
num_workers=8,
dataset='',
**kwargs):
super().__init__()
self.num_workers = num_workers
self.llm_tokenizer=llm_tokenizer
self.dataset = dataset
self.kwargs = kwargs
self.batch_size = kwargs['batch_size']
self.max_epochs = kwargs['max_epochs']
self.load_data_module()
self.load_prompt(kwargs['prompt_path'])
self.trainset = self.instancialize(stage='train')
self.valset = self.instancialize(stage='val')
self.testset = self.instancialize(stage='test')
self.max_steps = self.max_epochs*(len(self.trainset)//self.batch_size)//self.num_workers
def train_dataloader(self):
return DataLoader(self.trainset,
batch_size=self.batch_size,
num_workers=self.num_workers,
shuffle=True,
drop_last=True,
collate_fn=TrainCollater(prompt_list=self.prompt_list,llm_tokenizer=self.llm_tokenizer,train=True, max_step=self.max_steps))
def val_dataloader(self):
return DataLoader(self.valset,
batch_size=self.batch_size,
num_workers=self.num_workers,
shuffle=False,
collate_fn=TrainCollater(prompt_list=self.prompt_list,llm_tokenizer=self.llm_tokenizer,train=False))
def test_dataloader(self):
return DataLoader(self.testset,
batch_size=self.batch_size,
num_workers=self.num_workers,
shuffle=False,
collate_fn=TrainCollater(prompt_list=self.prompt_list,llm_tokenizer=self.llm_tokenizer,train=False))
def load_data_module(self):
name = self.dataset
camel_name = ''.join([i.capitalize() for i in name.split('_')])
try:
self.data_module = getattr(importlib.import_module(
'.'+name, package=__package__), camel_name)
except:
raise ValueError(
f'Invalid Dataset File Name or Invalid Class Name data.{name}.{camel_name}')
def instancialize(self, **other_args):
""" Instancialize a model using the corresponding parameters
from self.hparams dictionary. You can also input any args
to overwrite the corresponding value in self.kwargs.
"""
class_args = inspect.getargspec(self.data_module.__init__).args[1:]
inkeys = self.kwargs.keys()
args1 = {}
for arg in class_args:
if arg in inkeys:
args1[arg] = self.kwargs[arg]
args1.update(other_args)
return self.data_module(**args1)
def load_prompt(self,prompt_path):
if os.path.isfile(prompt_path):
with open(prompt_path, 'r') as f:
raw_prompts = f.read().splitlines()
self.prompt_list = [p.strip() for p in raw_prompts]
print('Load {} training prompts'.format(len(self.prompt_list)))
print('Prompt Example \n{}'.format(random.choice(self.prompt_list)))
else:
self.prompt_list = []
# Path: recommender/A_SASRec_final_bce_llm.py
class SASRec(nn.Module):
def __init__(self, hidden_size, item_num, state_size, dropout, device, num_heads=1):
super().__init__()
self.state_size = state_size
self.hidden_size = hidden_size
self.item_num = int(item_num)
self.dropout = nn.Dropout(dropout)
self.device = device
self.item_embeddings = nn.Embedding(
num_embeddings=item_num + 1,
embedding_dim=hidden_size,
)
nn.init.normal_(self.item_embeddings.weight, 0, 1)
self.positional_embeddings = nn.Embedding(
num_embeddings=state_size,
embedding_dim=hidden_size
)
self.emb_dropout = nn.Dropout(dropout)
self.ln_1 = nn.LayerNorm(hidden_size)
self.ln_2 = nn.LayerNorm(hidden_size)
self.ln_3 = nn.LayerNorm(hidden_size)
self.mh_attn = MultiHeadAttention(hidden_size, hidden_size, num_heads, dropout)
self.feed_forward = PositionwiseFeedForward(hidden_size, hidden_size, dropout)
self.s_fc = nn.Linear(hidden_size, item_num)
def forward(self, states, len_states):
inputs_emb = self.item_embeddings(states)
inputs_emb += self.positional_embeddings(torch.arange(self.state_size).to(self.device))
seq = self.emb_dropout(inputs_emb)
mask = torch.ne(states, self.item_num).float().unsqueeze(-1).to(self.device)
seq *= mask
seq_normalized = self.ln_1(seq)
mh_attn_out = self.mh_attn(seq_normalized, seq)
ff_out = self.feed_forward(self.ln_2(mh_attn_out))
ff_out *= mask
ff_out = self.ln_3(ff_out)
state_hidden = extract_axis_1(ff_out, len_states - 1)
supervised_output = self.s_fc(state_hidden).squeeze()
return supervised_output
def forward_eval(self, states, len_states):
inputs_emb = self.item_embeddings(states)
inputs_emb += self.positional_embeddings(torch.arange(self.state_size).to(self.device))
seq = self.emb_dropout(inputs_emb)
mask = torch.ne(states, self.item_num).float().unsqueeze(-1).to(self.device)
seq *= mask
seq_normalized = self.ln_1(seq)
mh_attn_out = self.mh_attn(seq_normalized, seq)
ff_out = self.feed_forward(self.ln_2(mh_attn_out))
ff_out *= mask
ff_out = self.ln_3(ff_out)
state_hidden = extract_axis_1(ff_out, len_states - 1)
supervised_output = self.s_fc(state_hidden).squeeze()
return supervised_output
def cacul_h(self, states, len_states):
inputs_emb = self.item_embeddings(states)
inputs_emb += self.positional_embeddings(torch.arange(self.state_size).to(self.device))
seq = self.emb_dropout(inputs_emb)
mask = torch.ne(states, self.item_num).float().unsqueeze(-1).to(self.device)
seq *= mask
seq_normalized = self.ln_1(seq)
mh_attn_out = self.mh_attn(seq_normalized, seq)
ff_out = self.feed_forward(self.ln_2(mh_attn_out))
ff_out *= mask
ff_out = self.ln_3(ff_out)
state_hidden = extract_axis_1(ff_out, len_states - 1)
return state_hidden
def cacu_x(self, x):
x = self.item_embeddings(x)
return x
# Path: recommender/A_SASRec_final_bce_llm.py
class Caser(nn.Module):
def __init__(self, hidden_size, item_num, state_size, num_filters, filter_sizes,
dropout_rate):
super(Caser, self).__init__()
self.hidden_size = hidden_size
self.item_num = int(item_num)
self.state_size = state_size
self.filter_sizes = eval(filter_sizes)
self.num_filters = num_filters
self.dropout_rate = dropout_rate
self.item_embeddings = nn.Embedding(
num_embeddings=item_num + 1,
embedding_dim=self.hidden_size,
)
# init embedding
nn.init.normal_(self.item_embeddings.weight, 0, 0.01)
# Horizontal Convolutional Layers
self.horizontal_cnn = nn.ModuleList(
[nn.Conv2d(1, self.num_filters, (i, self.hidden_size)) for i in self.filter_sizes])
# Initialize weights and biases
for cnn in self.horizontal_cnn:
nn.init.xavier_normal_(cnn.weight)
nn.init.constant_(cnn.bias, 0.1)
# Vertical Convolutional Layer
self.vertical_cnn = nn.Conv2d(1, 1, (self.state_size, 1))
nn.init.xavier_normal_(self.vertical_cnn.weight)
nn.init.constant_(self.vertical_cnn.bias, 0.1)
# Fully Connected Layer
self.num_filters_total = self.num_filters * len(self.filter_sizes)
final_dim = self.hidden_size + self.num_filters_total
self.s_fc = nn.Linear(final_dim, item_num)
# dropout
self.dropout = nn.Dropout(self.dropout_rate)
def forward(self, states, len_states):
input_emb = self.item_embeddings(states)
mask = torch.ne(states, self.item_num).float().unsqueeze(-1)
input_emb *= mask
input_emb = input_emb.unsqueeze(1)
pooled_outputs = []
for cnn in self.horizontal_cnn:
h_out = nn.functional.relu(cnn(input_emb))
h_out = h_out.squeeze()
p_out = nn.functional.max_pool1d(h_out, h_out.shape[2])
pooled_outputs.append(p_out)
h_pool = torch.cat(pooled_outputs, 1)
h_pool_flat = h_pool.view(-1, self.num_filters_total)
v_out = nn.functional.relu(self.vertical_cnn(input_emb))
v_flat = v_out.view(-1, self.hidden_size)
out = torch.cat([h_pool_flat, v_flat], 1)
out = self.dropout(out)
supervised_output = self.s_fc(out)
return supervised_output
def forward_eval(self, states, len_states):
input_emb = self.item_embeddings(states)
mask = torch.ne(states, self.item_num).float().unsqueeze(-1)
input_emb *= mask
input_emb = input_emb.unsqueeze(1)
pooled_outputs = []
for cnn in self.horizontal_cnn:
h_out = nn.functional.relu(cnn(input_emb))
h_out = h_out.squeeze()
p_out = nn.functional.max_pool1d(h_out, h_out.shape[2])
pooled_outputs.append(p_out)
h_pool = torch.cat(pooled_outputs, 1)
h_pool_flat = h_pool.view(-1, self.num_filters_total)
v_out = nn.functional.relu(self.vertical_cnn(input_emb))
v_flat = v_out.view(-1, self.hidden_size)
out = torch.cat([h_pool_flat, v_flat], 1)
out = self.dropout(out)
supervised_output = self.s_fc(out)
return supervised_output
# Path: recommender/A_SASRec_final_bce_llm.py
class GRU(nn.Module):
def __init__(self, hidden_size, item_num, state_size, gru_layers=1):
super(GRU, self).__init__()
self.hidden_size = hidden_size
self.item_num = item_num
self.state_size = state_size
self.item_embeddings = nn.Embedding(
num_embeddings=item_num + 1,
embedding_dim=self.hidden_size,
)
nn.init.normal_(self.item_embeddings.weight, 0, 0.01)
self.gru = nn.GRU(
input_size=self.hidden_size,
hidden_size=self.hidden_size,
num_layers=gru_layers,
batch_first=True
)
self.s_fc = nn.Linear(self.hidden_size, self.item_num)
def forward(self, states, len_states):
# Supervised Head
emb = self.item_embeddings(states)
emb_packed = torch.nn.utils.rnn.pack_padded_sequence(emb, len_states, batch_first=True, enforce_sorted=False)
emb_packed, hidden = self.gru(emb_packed)
hidden = hidden.view(-1, hidden.shape[2])
supervised_output = self.s_fc(hidden)
return supervised_output
def forward_eval(self, states, len_states):
# Supervised Head
emb = self.item_embeddings(states)
emb_packed = torch.nn.utils.rnn.pack_padded_sequence(emb, len_states, batch_first=True, enforce_sorted=False)
emb_packed, hidden = self.gru(emb_packed)
hidden = hidden.view(-1, hidden.shape[2])
supervised_output = self.s_fc(hidden)
return supervised_output
# Path: main.py
import os
import pytorch_lightning as pl
import pytorch_lightning.callbacks as plc
from argparse import ArgumentParser
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger, CSVLogger
from model.model_interface import MInterface
from data.data_interface import DInterface
from recommender.A_SASRec_final_bce_llm import SASRec, Caser, GRU
from SASRecModules_ori import *
from transformers import LlamaForCausalLM, LlamaTokenizer
def load_callbacks(args):
callbacks = []
callbacks.append(plc.EarlyStopping(
monitor='metric',
mode='max',
patience=10,
min_delta=0.001
))
callbacks.append(plc.ModelCheckpoint(
monitor='metric',
dirpath=args.ckpt_dir,
filename='{epoch:02d}-{metric:.3f}',
save_top_k=-1,
mode='max',
save_last=True,
#train_time_interval=args.val_check_interval
every_n_epochs=1
))
if args.lr_scheduler:
callbacks.append(plc.LearningRateMonitor(
logging_interval='step'))
return callbacks
def main(args):
pl.seed_everything(args.seed)
model = MInterface(**vars(args))
if args.ckpt_path:
ckpt = torch.load(args.ckpt_path, map_location='cpu')
model.load_state_dict(ckpt['state_dict'], strict=False)
print("load checkpoints from {}".format(args.ckpt_path))
data_module = DInterface(llm_tokenizer=model.llama_tokenizer,**vars(args))
args.max_steps=len(data_module.trainset) * args.max_epochs // (args.accumulate_grad_batches * args.batch_size)
logger = TensorBoardLogger(save_dir='./log/', name=args.log_dir)
args.callbacks = load_callbacks(args)
args.logger = logger
if not os.path.exists(args.ckpt_dir):
os.makedirs(args.ckpt_dir)
trainer = Trainer.from_argparse_args(args)
if args.auto_lr_find:
lr_finder=trainer.tuner.lr_find(model=model, datamodule=data_module, min_lr=1e-10, max_lr=1e-3, num_training=100)
fig=lr_finder.plot(suggest=True)
fig_path="lr_finder.png"
fig.savefig(fig_path)
print("Saving to {}".format(fig_path))
model.hparams.lr=lr_finder.suggestion()
if args.mode == 'train':
trainer.fit(model=model, datamodule=data_module)
else:
| trainer.test(model=model, datamodule=data_module) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: silicx/GoldFromOres
# Path: DatasetCondensation/utils.py
def get_loops(ipc):
# Get the two hyper-parameters of outer-loop and inner-loop.
# The following values are empirically good.
if ipc == 1:
outer_loop, inner_loop = 1, 1
elif ipc == 10:
outer_loop, inner_loop = 10, 50
elif ipc == 20:
outer_loop, inner_loop = 20, 25
elif ipc == 30:
outer_loop, inner_loop = 30, 20
elif ipc == 40:
outer_loop, inner_loop = 40, 15
elif ipc == 50:
outer_loop, inner_loop = 50, 10
else:
outer_loop, inner_loop = 0, 0
exit('loop hyper-parameters are not defined for %d ipc'%ipc)
return outer_loop, inner_loop
# Path: DatasetCondensation/utils.py
def get_dataset(dataset, data_path):
if dataset == 'MNIST':
channel = 1
im_size = (28, 28)
num_classes = 10
mean = [0.1307]
std = [0.3081]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = datasets.MNIST(data_path, train=True, download=True, transform=transform) # no augmentation
dst_test = datasets.MNIST(data_path, train=False, download=True, transform=transform)
class_names = [str(c) for c in range(num_classes)]
elif dataset == 'FashionMNIST':
channel = 1
im_size = (28, 28)
num_classes = 10
mean = [0.2861]
std = [0.3530]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = datasets.FashionMNIST(data_path, train=True, download=True, transform=transform) # no augmentation
dst_test = datasets.FashionMNIST(data_path, train=False, download=True, transform=transform)
class_names = dst_train.classes
elif dataset == 'SVHN':
channel = 3
im_size = (32, 32)
num_classes = 10
mean = [0.4377, 0.4438, 0.4728]
std = [0.1980, 0.2010, 0.1970]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = datasets.SVHN(data_path, split='train', download=True, transform=transform) # no augmentation
dst_test = datasets.SVHN(data_path, split='test', download=True, transform=transform)
class_names = [str(c) for c in range(num_classes)]
elif dataset == 'CIFAR10':
channel = 3
im_size = (32, 32)
num_classes = 10
mean = [0.4914, 0.4822, 0.4465]
std = [0.2023, 0.1994, 0.2010]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = datasets.CIFAR10(data_path, train=True, download=True, transform=transform) # no augmentation
dst_test = datasets.CIFAR10(data_path, train=False, download=True, transform=transform)
class_names = dst_train.classes
elif dataset == 'CIFAR100':
channel = 3
im_size = (32, 32)
num_classes = 100
mean = [0.5071, 0.4866, 0.4409]
std = [0.2673, 0.2564, 0.2762]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = datasets.CIFAR100(data_path, train=True, download=True, transform=transform) # no augmentation
dst_test = datasets.CIFAR100(data_path, train=False, download=True, transform=transform)
class_names = dst_train.classes
elif dataset == 'TinyImageNet':
channel = 3
im_size = (64, 64)
num_classes = 200
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
data = torch.load(os.path.join(data_path, 'tinyimagenet.pt'), map_location='cpu')
class_names = data['classes']
images_train = data['images_train']
labels_train = data['labels_train']
images_train = images_train.detach().float() / 255.0
labels_train = labels_train.detach()
for c in range(channel):
images_train[:,c] = (images_train[:,c] - mean[c])/std[c]
dst_train = TensorDataset(images_train, labels_train) # no augmentation
images_val = data['images_val']
labels_val = data['labels_val']
images_val = images_val.detach().float() / 255.0
labels_val = labels_val.detach()
for c in range(channel):
images_val[:, c] = (images_val[:, c] - mean[c]) / std[c]
dst_test = TensorDataset(images_val, labels_val) # no augmentation
else:
exit('unknown dataset: %s'%dataset)
testloader = torch.utils.data.DataLoader(dst_test, batch_size=256, shuffle=False, num_workers=0)
return channel, im_size, num_classes, class_names, mean, std, dst_train, dst_test, testloader
# Path: DatasetCondensation/utils.py
def get_network(model, channel, num_classes, im_size=(32, 32)):
torch.random.manual_seed(int(time.time() * 1000) % 100000)
net_width, net_depth, net_act, net_norm, net_pooling = get_default_convnet_setting()
if model == 'MLP':
net = MLP(channel=channel, num_classes=num_classes)
elif model == 'ConvNet':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)
elif model == 'LeNet':
net = LeNet(channel=channel, num_classes=num_classes)
elif model == 'AlexNet':
net = AlexNet(channel=channel, num_classes=num_classes)
elif model == 'AlexNetBN':
net = AlexNetBN(channel=channel, num_classes=num_classes)
elif model == 'VGG11':
net = VGG11( channel=channel, num_classes=num_classes)
elif model == 'VGG11BN':
net = VGG11BN(channel=channel, num_classes=num_classes)
elif model == 'ResNet18':
net = ResNet18(channel=channel, num_classes=num_classes)
elif model == 'ResNet18BN_AP':
net = ResNet18BN_AP(channel=channel, num_classes=num_classes)
elif model == 'ResNet18BN':
net = ResNet18BN(channel=channel, num_classes=num_classes)
elif model == 'ConvNetD1':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=1, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetD2':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=2, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetD3':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=3, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetD4':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=4, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetW32':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=32, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetW64':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=64, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetW128':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=128, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetW256':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=256, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetAS':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act='sigmoid', net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetAR':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act='relu', net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetAL':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act='leakyrelu', net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetASwish':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act='swish', net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetASwishBN':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act='swish', net_norm='batchnorm', net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetNN':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm='none', net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetBN':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm='batchnorm', net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetLN':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm='layernorm', net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetIN':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm='instancenorm', net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetGN':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm='groupnorm', net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetNP':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling='none', im_size=im_size)
elif model == 'ConvNetMP':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling='maxpooling', im_size=im_size)
elif model == 'ConvNetAP':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling='avgpooling', im_size=im_size)
else:
net = None
exit('unknown model: %s'%model)
gpu_num = torch.cuda.device_count()
if gpu_num>0:
device = 'cuda'
if gpu_num>1:
net = nn.DataParallel(net)
else:
device = 'cpu'
net = net.to(device)
return net
# Path: DatasetCondensation/utils.py
def get_eval_pool(eval_mode, model, model_eval):
if eval_mode == 'M': # multiple architectures
model_eval_pool = ['MLP', 'ConvNet', 'LeNet', 'AlexNet', 'VGG11', 'ResNet18']
elif eval_mode == 'B': # multiple architectures with BatchNorm for DM experiments
model_eval_pool = ['ConvNetBN', 'ConvNetASwishBN', 'AlexNetBN', 'VGG11BN', 'ResNet18BN']
elif eval_mode == 'W': # ablation study on network width
model_eval_pool = ['ConvNetW32', 'ConvNetW64', 'ConvNetW128', 'ConvNetW256']
elif eval_mode == 'D': # ablation study on network depth
model_eval_pool = ['ConvNetD1', 'ConvNetD2', 'ConvNetD3', 'ConvNetD4']
elif eval_mode == 'A': # ablation study on network activation function
model_eval_pool = ['ConvNetAS', 'ConvNetAR', 'ConvNetAL', 'ConvNetASwish']
elif eval_mode == 'P': # ablation study on network pooling layer
model_eval_pool = ['ConvNetNP', 'ConvNetMP', 'ConvNetAP']
elif eval_mode == 'N': # ablation study on network normalization layer
model_eval_pool = ['ConvNetNN', 'ConvNetBN', 'ConvNetLN', 'ConvNetIN', 'ConvNetGN']
elif eval_mode == 'S': # itself
if 'BN' in model:
print('Attention: Here I will replace BN with IN in evaluation, as the synthetic set is too small to measure BN hyper-parameters.')
model_eval_pool = [model[:model.index('BN')]] if 'BN' in model else [model]
elif eval_mode == 'SS': # itself
model_eval_pool = [model]
else:
model_eval_pool = [model_eval]
return model_eval_pool
# Path: DatasetCondensation/utils.py
def evaluate_synset(it_eval, net, images_train, labels_train, testloader, args):
net = net.to(args.device)
images_train = images_train.to(args.device)
labels_train = labels_train.to(args.device)
lr = float(args.lr_net)
Epoch = int(args.epoch_eval_train)
lr_schedule = [Epoch//2+1]
optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=0.0005)
criterion = nn.CrossEntropyLoss().to(args.device)
dst_train = TensorDataset(images_train, labels_train)
trainloader = torch.utils.data.DataLoader(dst_train, batch_size=args.batch_train, shuffle=True, num_workers=0)
start = time.time()
for ep in range(Epoch+1):
loss_train, acc_train = epoch('train', trainloader, net, optimizer, criterion, args, aug = True)
if ep in lr_schedule:
lr *= 0.1
optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=0.0005)
time_train = time.time() - start
loss_test, acc_test = epoch('test', testloader, net, optimizer, criterion, args, aug = False)
print('%s Evaluate_%02d: epoch = %04d train time = %d s train loss = %.6f train acc = %.4f, test acc = %.4f' % (get_time(), it_eval, Epoch, int(time_train), loss_train, acc_train, acc_test))
return net, acc_train, acc_test
# Path: DatasetCondensation/utils.py
def get_daparam(dataset, model, model_eval, ipc):
# We find that augmentation doesn't always benefit the performance.
# So we do augmentation for some of the settings.
dc_aug_param = dict()
dc_aug_param['crop'] = 4
dc_aug_param['scale'] = 0.2
dc_aug_param['rotate'] = 45
dc_aug_param['noise'] = 0.001
dc_aug_param['strategy'] = 'none'
if dataset == 'MNIST':
dc_aug_param['strategy'] = 'crop_scale_rotate'
if model_eval in ['ConvNetBN']: # Data augmentation makes model training with Batch Norm layer easier.
dc_aug_param['strategy'] = 'crop_noise'
return dc_aug_param
# Path: DatasetCondensation/utils.py
def match_loss(gw_syn, gw_real, args):
dis = torch.tensor(0.0).to(args.device)
if args.dis_metric == 'ours':
for ig in range(len(gw_real)):
gwr = gw_real[ig]
gws = gw_syn[ig]
dis += distance_wb(gwr, gws)
elif args.dis_metric == 'mse':
gw_real_vec = []
gw_syn_vec = []
for ig in range(len(gw_real)):
gw_real_vec.append(gw_real[ig].reshape((-1)))
gw_syn_vec.append(gw_syn[ig].reshape((-1)))
gw_real_vec = torch.cat(gw_real_vec, dim=0)
gw_syn_vec = torch.cat(gw_syn_vec, dim=0)
dis = torch.sum((gw_syn_vec - gw_real_vec)**2)
elif args.dis_metric == 'cos':
gw_real_vec = []
gw_syn_vec = []
for ig in range(len(gw_real)):
gw_real_vec.append(gw_real[ig].reshape((-1)))
gw_syn_vec.append(gw_syn[ig].reshape((-1)))
gw_real_vec = torch.cat(gw_real_vec, dim=0)
gw_syn_vec = torch.cat(gw_syn_vec, dim=0)
dis = 1 - torch.sum(gw_real_vec * gw_syn_vec, dim=-1) / (torch.norm(gw_real_vec, dim=-1) * torch.norm(gw_syn_vec, dim=-1) + 0.000001)
else:
exit('unknown distance function: %s'%args.dis_metric)
return dis
# Path: DatasetCondensation/utils.py
def get_time():
return str(time.strftime("[%Y-%m-%d %H:%M:%S]", time.localtime()))
# Path: DatasetCondensation/utils.py
class TensorDataset(Dataset):
def __init__(self, images, labels): # images: n x c x h x w tensor
self.images = images.detach().float()
self.labels = labels.detach()
def __getitem__(self, index):
return self.images[index], self.labels[index]
def __len__(self):
return self.images.shape[0]
# Path: DatasetCondensation/utils.py
def epoch(mode, dataloader, net, optimizer, criterion, args, aug):
loss_avg, acc_avg, num_exp = 0, 0, 0
net = net.to(args.device)
criterion = criterion.to(args.device)
if mode == 'train':
net.train()
else:
net.eval()
for i_batch, datum in enumerate(dataloader):
img = datum[0].float().to(args.device)
if aug:
if args.dsa:
img = DiffAugment(img, args.dsa_strategy, param=args.dsa_param)
else:
img = augment(img, args.dc_aug_param, device=args.device)
lab = datum[1].long().to(args.device)
n_b = lab.shape[0]
output = net(img)
loss = criterion(output, lab)
acc = np.sum(np.equal(np.argmax(output.cpu().data.numpy(), axis=-1), lab.cpu().data.numpy()))
loss_avg += loss.item()*n_b
acc_avg += acc
num_exp += n_b
if mode == 'train':
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_avg /= num_exp
acc_avg /= num_exp
return loss_avg, acc_avg
# Path: DatasetCondensation/utils.py
def DiffAugment(x, strategy='', seed = -1, param = None):
if strategy == 'None' or strategy == 'none' or strategy == '':
return x
if seed == -1:
param.Siamese = False
else:
param.Siamese = True
param.latestseed = seed
if strategy:
if param.aug_mode == 'M': # original
for p in strategy.split('_'):
for f in AUGMENT_FNS[p]:
x = f(x, param)
elif param.aug_mode == 'S':
pbties = strategy.split('_')
set_seed_DiffAug(param)
p = pbties[torch.randint(0, len(pbties), size=(1,)).item()]
for f in AUGMENT_FNS[p]:
x = f(x, param)
else:
exit('unknown augmentation mode: %s'%param.aug_mode)
x = x.contiguous()
return x
# Path: DatasetCondensation/utils.py
class ParamDiffAug():
def __init__(self):
self.aug_mode = 'S' #'multiple or single'
self.prob_flip = 0.5
self.ratio_scale = 1.2
self.ratio_rotate = 15.0
self.ratio_crop_pad = 0.125
self.ratio_cutout = 0.5 # the size would be 0.5x0.5
self.brightness = 1.0
self.saturation = 2.0
self.contrast = 0.5
# Path: drop_utils/drop.py
def drop_samples(images_all, labels_all, indices_class,
dataset: str, drop_criterion: str,
*, drop_ratio=None, keep_ratio=None):
"""images_all, labels_all, indices_class: the dataset structure that commonly used for DD
dataset: (str) dataset name
drop_criterion: (str) =`random`, or in the format of ${utility-indicator}_${order}, e.g. LossConverge_Small
drop_ratio, keep_ratio: only one of them should be specified (drop_ratio = 1.0 - keep_ratio)
"""
assert (drop_ratio is None) ^ (keep_ratio is None), \
f"Only one of drop_ratio ({drop_ratio}) and keep_ratio ({keep_ratio}) should be specified."
if drop_ratio is None:
assert keep_ratio is not None, "I know keep_ratio must have value here! I'm muting the warning in my way."
drop_ratio = 1.0 - keep_ratio
assert 0.0 <= drop_ratio <= 1.0, str(drop_ratio)
# Here's the tricky part: remember that in any case, the samples we hope to drop is sorted to the left
# of the sequence, so we keep the `keep_ratio`% samples at right,
# i.e. we keep the range [drop_ratio, 100%]
dropped_idx_set = sample_indices_to_drop(dataset, drop_criterion, indices_class, drop_ratio, 1.0)
# re-indexing
images_all = [x for i, x in enumerate(images_all) if i not in dropped_idx_set]
print("Original:", labels_all.shape[0], "; Now:", len(images_all), "remain")
labels_all = [x for i, x in enumerate(labels_all) if i not in dropped_idx_set]
indices_class = [[] for c in range(len(indices_class))]
for i, lab in enumerate(labels_all):
indices_class[lab].append(i)
# for i, x in enumerate(indices_class):
# print("Class", i, "remains", len(x), "samples")
images_all = torch.stack(images_all, dim=0)
labels_all = torch.tensor(labels_all, dtype=torch.long, device=images_all.device)
torch.cuda.empty_cache()
return images_all, labels_all, indices_class
# Path: DatasetCondensation/main.py
import os
import time
import copy
import argparse
import numpy as np
import torch
import torch.nn as nn
import pdb
from torchvision.utils import save_image
from .utils import get_loops, get_dataset, get_network, get_eval_pool, evaluate_synset, get_daparam, match_loss, get_time, TensorDataset, epoch, DiffAugment, ParamDiffAug
from drop_utils import drop_samples
def main():
parser = argparse.ArgumentParser(description='Parameter Processing')
parser.add_argument('--method', type=str, default='DC', help='DC/DSA')
parser.add_argument('--dataset', type=str, default='CIFAR10', help='dataset')
parser.add_argument('--model', type=str, default='ConvNet', help='model')
parser.add_argument('--ipc', type=int, default=1, help='image(s) per class')
| parser.add_argument('--eval_mode', type=str, default='S', help='eval_mode') # S: the same to training model, M: multi architectures, W: net width, D: net depth, A: activation function, P: pooling layer, N: normalization layer,
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: flatypus/flowchat
# Path: flowchat/autodedent.py
def autodedent(*text_lines) -> str:
"""Format multiline strings, including with multiple levels of indentation, to align with the first line.
Example:
code = '''
def add(a, b):
return a + b
'''
autodedent(
"What does this code do?",
code,
"Suggest a comment that describes what this code does."
)
"""
text_lines = [i if isinstance(i, str) else str(i) for i in text_lines]
return dedent('\n'.join(text_lines)).strip("\n")
# Path: flowchat/chain.py
class Chain:
def __init__(self, model: str, api_key: str = None, environ_key="OPENAI_API_KEY"):
super().__init__()
if type(model) is not str:
raise TypeError(
f"Model argument must be a string, not {type(model)}"
)
if api_key is not None and type(api_key) is not str:
raise TypeError(
f"API key argument must be a string, not {type(api_key)}"
)
if type(environ_key) is not str:
raise TypeError(
f"Environment key argument must be a string, not {type(environ_key)}"
)
if api_key is None:
api_key = os.environ.get(environ_key)
if not api_key:
raise ValueError(
"OpenAI API key not found. Please set the OPENAI_API_KEY environment variable, "
"pass in an api_key parameter, or set the environ_key parameter to the environment "
"variable that contains your API key."
)
openai.api_key = api_key
self.model = model
self.system = None
self.user_prompt = []
self.model_response = None
self.prompt_tokens = 0
self.completion_tokens = 0
def _query_api(self, function: callable, *args, max_query_time=None, **kwargs):
"""Call the API for max_query_time seconds, and if it times out, it will retry."""
timeouted_function = timeout(
dec_timeout=max_query_time, use_signals=False)(function)
return timeouted_function(*args, **kwargs)
def _try_query_and_parse(self, function: callable, json_schema, *args, max_query_time=None, **kwargs):
"""Query and try to parse the response, and if it fails, it will retry."""
completion = self._query_api(
function, *args, max_query_time=max_query_time, **kwargs)
if completion is None:
return None
if kwargs.get('stream', False):
return completion
message = completion.choices[0].message.content
if not json_schema is None:
open_bracket = message.find('{')
close_bracket = message.rfind('}')
message = message[open_bracket:close_bracket+1]
try:
message = json.loads(message)
except json.JSONDecodeError:
raise Exception(
"Response was not in the expected JSON format. Please try again. Check that you haven't accidentally lowered the max_tokens parameter so that the response is truncated."
)
self.prompt_tokens += completion.usage.prompt_tokens
self.completion_tokens += completion.usage.completion_tokens
return message
def _ask(
self,
system: Message,
user_messages: List[Message],
json_schema: Any = None,
max_query_time=None,
tries=-1,
**params
):
"""Ask a question to the chatbot with a system prompt and return the response."""
if not user_messages:
return None
messages = [
system,
*user_messages
] if system else user_messages
message = retry(delay=1, logger=logging, tries=tries)(self._try_query_and_parse)(
openai.chat.completions.create,
json_schema=json_schema,
messages=messages,
max_query_time=max_query_time,
**params
)
return message
def _format_images(self, image: str | ImageFormat | Any):
"""Format whatever image format we receive into the specific format that OpenAI's API expects."""
if isinstance(image, str):
return {"url": image}
elif not isinstance(image, dict):
# not string or dict so assume PIL image
# no specific file format, so default to PNG
return {"url": _encode_image(image, "PNG")}
else:
# we've received an object then; encode the image if necessary
if 'url' not in image:
raise Exception(
"Image object must have a url property."
)
if isinstance(image['url'], str):
url = image['url']
else:
file_format = image['format_type'] if 'format_type' in image else "PNG"
url = _encode_image(image['url'], file_format)
return {
"url": url,
**({"detail": image["detail"]} if "detail" in image else {})
}
def unhook(self):
"""Reset the chain's system and user prompt. The previous response is kept."""
self.system = None
self.user_prompt = []
return self
def anchor(self, system_prompt: str):
"""Set the chain's system prompt."""
if not isinstance(system_prompt, str):
raise TypeError(
f"System prompt must be a string, not {type(system_prompt)}"
)
self.system = {"role": "system", "content": system_prompt}
return self
def transform(self, function: Callable[[str], str]):
"""Transform the chain's model response with a function."""
if not callable(function):
raise TypeError(
f"Transform function must be callable, not {type(function)}"
)
self.model_response = function(self.model_response)
return self
def link(self, modifier: Union[Callable[[str], None], str], model: str = None, assistant=False, images: str | Any | List[str | Any] | ImageFormat = None):
"""Modify the chain's user prompt with a function, or just pass in a string to be added to the message list.
For example:
```
chain = (Chain()
.anchor("Hello!")
.link("How are you?")
.pull().unhook()
.link(lambda response: f"What emotions characterize this response? {response}")
.pull()
.log())
```
"""
if model is None:
model = self.model
if not callable(modifier) and not isinstance(modifier, str):
raise TypeError(
f"Modifier must be callable or string, not {type(modifier)}"
)
if isinstance(modifier, str) and modifier == "":
raise ValueError(
"Modifier cannot be an empty string."
)
prompt = modifier(self.model_response) if callable(
modifier) else modifier
role = "assistant" if assistant else "user"
if images is None:
self.user_prompt.append({"role": role, "content": prompt})
else:
# images accepts a string (url), a PIL image, as well as a specific typed dict, or a list of any of these
images = [images] if not isinstance(images, list) else images
images = [
{"type": "image_url", "image_url": self._format_images(image)}
for image in images
]
self.user_prompt.append(
{"role": role, "content": [
{"type": "text", "text": prompt},
*images
]}
)
return self
def pull(
self,
model: str = None,
frequency_penalty: float | int = None,
json_schema: Any = None,
logit_bias: Dict[str, float | int] = None,
max_query_time=None,
max_tokens: float | int = None,
n: float | int = None,
presence_penalty: float | int = None,
response_format: ResponseFormat = None,
seed: int = None,
stop: str | List[str] = None,
temperature: float | int = None,
top_p: float | int = None,
tries: int = -1
):
"""Make a request to the LLM and set the response."""
if model is None:
model = self.model
params = {
'frequency_penalty': frequency_penalty,
'logit_bias': logit_bias,
'max_query_time': max_query_time,
'max_tokens': max_tokens,
'model': model,
'n': n,
'presence_penalty': presence_penalty,
'response_format': response_format,
'seed': seed,
'stop': stop,
'temperature': temperature,
'top_p': top_p,
}
params = {k: v for k, v in params.items() if v is not None}
if json_schema is not None:
if not isinstance(json_schema, dict):
raise TypeError(
f"JSON schema must be a dictionary, not {type(json_schema)}"
)
params['response_format'] = {'type': 'json_object'}
params['model'] = 'gpt-4-1106-preview'
self.user_prompt[-1]['content'] += autodedent(
"You must respond in the following example JSON format. Remember to enclose the entire JSON object in curly braces:",
json.dumps(json_schema, indent=4)
)
response = self._ask(
self.system, self.user_prompt,
json_schema, tries=tries, **params
)
self.model_response = response
return self
def stream(
self,
plain_text_stream: bool = False,
model: str = None,
frequency_penalty: float | int = None,
logit_bias: Dict[str, float | int] = None,
max_query_time=None,
max_tokens: float | int = None,
n: float | int = None,
presence_penalty: float | int = None,
seed: int = None,
stop: str | List[str] = None,
temperature: float | int = None,
top_p: float | int = None,
):
"""Returns a generator that yields responses from the LLM."""
if model is None:
model = self.model
params = {
'frequency_penalty': frequency_penalty,
'logit_bias': logit_bias,
'max_query_time': max_query_time,
'max_tokens': max_tokens,
'model': model,
'n': n,
'presence_penalty': presence_penalty,
'seed': seed,
'stop': stop,
'temperature': temperature,
'top_p': top_p,
'stream': True
}
params = {k: v for k, v in params.items() if v is not None}
if not plain_text_stream:
return self._ask(
self.system, self.user_prompt,
None, **params
)
return (response.choices[0].delta.content
for response in self._ask(self.system, self.user_prompt, None, **params))
def last(self) -> str:
"""Return the chain's last model response."""
return self.model_response
def token_usage(self) -> int:
"""Return the number of tokens used"""
return self.prompt_tokens, self.completion_tokens
def log(self):
"""Log the chain's system prompt, user prompt, and model response."""
print('='*60)
print(f"System: {self.system}")
print(f"User: {self.user_prompt}")
print(f"Text: {self.model_response}")
print('='*60)
print("\n")
return self
def log_tokens(self):
"""Log the number of tokens used"""
prompt, completion = self.token_usage()
print(f"Prompt tokens: {prompt}")
print(f"Completion tokens: {completion}")
print(f"Total tokens: {prompt + completion}")
return self
# Path: examples/natural_language_cli.py
from flowchat import Chain, autodedent
import os
import subprocess
def execute_system_command(command):
try:
result = subprocess.run(
command, shell=True, check=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
)
return result.stdout
except subprocess.CalledProcessError as e:
return e.stderr
def main():
print("Welcome to the Natural Language Command Line Interface!")
os_system_context = f"You are a shell interpreter assistant running on {os.name} operating system."
while True:
user_input = input("Please enter your command in natural language: ")
# ========================================================================== #
should_exit = (
Chain(model="gpt-3.5-turbo")
.link(autodedent(
"Does the user want to exit the CLI? Respond with 'YES' or 'NO'.",
user_input
)).pull(max_tokens=2).unhook().last()
)
if should_exit.lower() in ("yes", "y"):
print("Exiting the CLI.")
break
# ========================================================================== #
print("Checking if the command is possible to execute...")
# Check if the user's request is possible; example of nested chains!
# In practice, you could just ignore all of this and just execute the command.
possible = (
Chain(model="gpt-4-1106-preview")
.anchor(os_system_context)
.link(autodedent(
"The user would like to do this: ",
user_input
))
.link("Create a short list of the minimum requirements that need to be checked in order to determine if this action is possible on this device.")
.pull(json_schema={"requirement_list": "List[str]"})
.transform(lambda requirement_json: requirement_json["requirement_list"]).log()
.transform(lambda requirement_list: [
Chain("gpt-4-1106-preview")
.anchor(os_system_context)
.link(autodedent(
"Suggest a command that can check if this requirement is met. The command should be a one-liner without user input or interaction.",
requirement,
"If the command needs additional information, you can include it. If the command itself can be run alone, leave additional_info an empty list."
))
.pull(json_schema={"command": "string", "additional_info": "List[str]"})
.transform(lambda command_json: (command_json["command"], [
Chain("gpt-4-1106-preview")
.anchor(os_system_context)
.link(autodedent(
"The user would like to know this information: ",
info,
"Suggest a command that can check if this information is available."
))
.pull(json_schema={"command": "string"})
.transform(lambda command_json: command_json["command"])
.transform(lambda command: f"{info} | Output:{execute_system_command(command)}")
.unhook().last()
for info in command_json.get("additional_info")]
)).unhook()
.anchor(os_system_context)
.link(lambda command_info: autodedent(
"Include the additional information in the command:",
command_info[0],
*command_info[1],
| "to create a final command that can check if this requirement is met:", |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: WHU-USI3DV/PatchAugNet
# Path: libs/pointops/functions/pointops.py
class FurthestSampling(Function):
class Gathering(Function):
class NearestNeighbor(Function):
class Interpolation(Function):
class Grouping(Function):
class GroupingInt(Function):
class BallQuery(Function):
class FeatureDistribute(Function):
class FeatureGather(Function):
class LabelStatBallRange(Function):
class LabelStatIdx(Function):
class LabelStatAndBallQuery(Function):
class KNNQueryNaive(Function):
class KNNQuery(Function):
class KNNQueryExclude(Function):
class QueryAndGroup(nn.Module):
class QueryAndGroup_Edge(nn.Module):
class QueryAndGroup_Edge_Split(nn.Module):
class GroupAll(nn.Module):
def forward(ctx, xyz, m):
def backward(xyz, a=None):
def forward(ctx, features, idx):
def backward(ctx, grad_out):
def forward(ctx, unknown: torch.Tensor, known: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
def backward(ctx, a=None, b=None):
def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
def backward(ctx, a=None):
def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor) -> torch.Tensor:
def backward(ctx, a=None):
def forward(ctx, max_xyz: torch.Tensor, xyz: torch.Tensor) -> torch.Tensor:
def backward(ctx, a=None):
def forward(ctx, max_feature: torch.Tensor, distribute_idx: torch.Tensor) -> torch.Tensor:
def backward(ctx, grad_distribute_feature: torch.Tensor):
def forward(ctx, radius: float, xyz: torch.Tensor, new_xyz: torch.Tensor, label_stat: torch.Tensor) -> torch.Tensor:
def backward(ctx, a=None):
def forward(ctx, nsample: int, label_stat: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
def backward(ctx, a=None):
def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor, label_stat: torch.Tensor):
def backward(ctx, a=None, b=None):
def pairwise_distances(x, y=None):
def forward(ctx, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> Tuple[torch.Tensor]:
def backward(ctx):
def forward(ctx, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> Tuple[torch.Tensor]:
def backward(ctx, a=None):
def forward(ctx, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> Tuple[torch.Tensor]:
def backward(ctx):
def __init__(self, radius=None, nsample=32, use_xyz=True):
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
def __init__(self, radius=None, nsample=32, knn_dilation=1, use_xyz=True, ret_gxyz=False, ret_sample_idx=False):
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, center_features: torch.Tensor = None, idx: torch.Tensor = None) -> \
def __init__(self, radius=None, nsample=32, use_xyz=True, ret_gxyz=False):
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, center_features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
def __init__(self, use_xyz: bool = True):
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None) -> Tuple[torch.Tensor]:
# Path: utils/model_util/pt_util.py
class SharedMLP(nn.Sequential):
class SharedMLP_1d(nn.Sequential):
class _BNBase(nn.Sequential):
class BatchNorm1d(_BNBase):
class BatchNorm2d(_BNBase):
class BatchNorm3d(_BNBase):
class _ConvBase(nn.Sequential):
class Conv1d(_ConvBase):
class Conv2d(_ConvBase):
class Conv3d(_ConvBase):
class FC(nn.Sequential):
class _DropoutNoScaling(InplaceFunction):
class _FeatureDropoutNoScaling(_DropoutNoScaling):
class TrainValSplitter():
class BNMomentumScheduler(object):
class Trainer(object):
def __init__(
self,
args: List[int],
*,
bn: bool = False,
activation=nn.ReLU(inplace=True),
preact: bool = False,
first: bool = False,
name: str = ""
):
def __init__(
self,
args: List[int],
*,
bn: bool = False,
activation=nn.ReLU(inplace=True),
preact: bool = False,
first: bool = False,
name: str = ""
):
def __init__(self, in_size, batch_norm=None, name=""):
def __init__(self, in_size: int, *, name: str = ""):
def __init__(self, in_size: int, name: str = ""):
def __init__(self, in_size: int, name: str = ""):
def __init__(
self,
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=None,
batch_norm=None,
bias=True,
preact=False,
name=""
):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: int = 1,
stride: int = 1,
padding: int = 0,
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = ""
):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: Tuple[int, int] = (1, 1),
stride: Tuple[int, int] = (1, 1),
padding: Tuple[int, int] = (0, 0),
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = ""
):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: Tuple[int, int, int] = (1, 1, 1),
stride: Tuple[int, int, int] = (1, 1, 1),
padding: Tuple[int, int, int] = (0, 0, 0),
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = ""
):
def __init__(
self,
in_size: int,
out_size: int,
*,
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=None,
preact: bool = False,
name: str = ""
):
def _make_noise(input):
def symbolic(g, input, p=0.5, train=False, inplace=False):
def forward(cls, ctx, input, p=0.5, train=False, inplace=False):
def backward(ctx, grad_output):
def symbolic(input, p=0.5, train=False, inplace=False):
def _make_noise(input):
def group_model_params(model: nn.Module, **kwargs):
def checkpoint_state(
model=None, optimizer=None, best_prec=None, epoch=None, it=None
):
def save_checkpoint(
state, is_best, filename='checkpoint', bestname='model_best'
):
def load_checkpoint(model=None, optimizer=None, filename='checkpoint'):
def variable_size_collate(pad_val=0, use_shared_memory=True):
def wrapped(batch):
def __init__(
self, *, numel: int, percent_train: float, shuffled: bool = False
):
def set_bn_momentum_default(bn_momentum):
def fn(m):
def __init__(
self, model, bn_lambda, last_epoch=-1,
setter=set_bn_momentum_default
):
def step(self, epoch=None):
def __init__(
self,
model,
model_fn,
optimizer,
checkpoint_name="ckpt",
best_name="best",
lr_scheduler=None,
bnm_scheduler=None,
eval_frequency=-1,
viz=None
):
def _decode_value(v):
def _train_it(self, it, batch):
def eval_epoch(self, d_loader):
def train(
self,
start_it,
start_epoch,
n_epochs,
train_loader,
test_loader=None,
best_loss=0.0
):
# Path: place_recognition/pptnet_origin/models/pptnet.py
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import functools
import numpy as np
import time
import os
import sys
import loupe as lp
from torch.autograd import Variable
from typing import List
from libs.pointops.functions import pointops
from utils.model_util import pt_util
feature_size=param["FEATURE_SIZE"], # 256,256,256,256
max_samples=param["MAX_SAMPLES"], # 64,256,1024,4096
cluster_size=param["CLUSTER_SIZE"], # 1,4,16,64
output_dim=param["OUTPUT_DIM"], # 256,256,256,256
gating=param['GATING'], # True
add_batch_norm=True
)
else:
print("No aggregation algorithm: ", aggregation)
self.use_normalize = use_normalize
def forward(self, x, return_feat=True):
r"""
x: B x 1 x N x 3
"""
x = x.squeeze(1)
res = self.backbone(x)
center_idx = res['center_idx_origin']
f0, f1, f2, f3 = res['fp_features'][0], res['fp_features'][1], res['fp_features'][2], res['fp_features'][3]
x = self.aggregation(f0, f1, f2, f3) # B x C0x64x1, BxC1x256, BxC2x1024, BxC3x4096 -> Bx256
if self.use_normalize:
x = F.normalize(x)
if return_feat:
return x, res['fp_features'], center_idx
else:
return x
class PointNet2(nn.Module):
def __init__(self, param=None):
super().__init__()
c = 3
k = 13
use_xyz = True
self.SA_modules = nn.ModuleList()
sap = param['SAMPLING']
knn = param['KNN']
fs = param['FEATURE_SIZE']
gp = param['GROUP']
self.SA_modules.append(
PointNet2SAModule(npoint=sap[0], nsample=knn[0], gp=gp, mlp=[c, 32, 32, 64], use_xyz=use_xyz))
self.SA_modules.append(
PointNet2SAModule(npoint=sap[1], nsample=knn[1], gp=gp, mlp=[64, 64, 64, 128], use_xyz=use_xyz))
self.SA_modules.append(
PointNet2SAModule(npoint=sap[2], nsample=knn[2], gp=gp, mlp=[128, 128, 128, 256], use_xyz=use_xyz))
self.SA_modules.append(
PointNet2SAModule(npoint=sap[3], nsample=knn[3], gp=gp, mlp=[256, 256, 256, 512], use_xyz=use_xyz))
self.FP_modules = nn.ModuleList()
self.FP_modules.append(PointNet2FPModule(mlp=[fs[1] + c, 256, 256, fs[0]]))
self.FP_modules.append(PointNet2FPModule(mlp=[fs[2] + 64, 256, fs[1]]))
self.FP_modules.append(PointNet2FPModule(mlp=[fs[3] + 128, 256, fs[2]]))
self.FP_modules.append(PointNet2FPModule(mlp=[512 + 256, 256, fs[3]]))
def forward(self, pointcloud: torch.cuda.FloatTensor):
r"""
Forward pass of the network
Parameters
----------
pointcloud: Variable(torch.cuda.FloatTensor)
(B, N, 3) tensor
Point cloud to run predicts on
Each point in the point-cloud MUST
be formated as (x, y, z, features...)
"""
l_xyz, l_features = [pointcloud], [pointcloud.transpose(1, 2).contiguous()]
l_center_idx = []
l_sample_idx = []
for i in range(len(self.SA_modules)):
li_xyz, li_center_idx, li_sample_idx, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
l_center_idx.append(li_center_idx)
l_sample_idx.append(li_sample_idx)
# get center idx and sample idx in origin cloud
l_center_idx_origin = [l_center_idx[0]]
l_sample_idx_origin = [l_sample_idx[0]]
for i in range(1, len(l_center_idx)):
li_center_idx_origin = torch.gather(l_center_idx_origin[i - 1], -1, l_center_idx[i].long())
temp_l_center_idx_origin = l_center_idx_origin[i - 1].unsqueeze(1)
temp_l_center_idx_origin = temp_l_center_idx_origin.repeat(1, l_sample_idx[i].shape[1], 1)
li_sample_idx_origin = torch.gather(temp_l_center_idx_origin, -1, l_sample_idx[i].long())
l_center_idx_origin.append(li_center_idx_origin)
l_sample_idx_origin.append(li_sample_idx_origin)
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i])
# l3: B x C x 64
# l2: B x C x 256
# l1: B x C x 1024
# l0: B x C x 4096
return {
'center_idx_origin': l_center_idx_origin,
'sample_idx_origin': l_sample_idx_origin,
'fp_features': [l_features[3].unsqueeze(-1), l_features[2].unsqueeze(-1),
l_features[1].unsqueeze(-1), l_features[0].unsqueeze(-1)]
}
class _PointNet2SAModuleBase(nn.Module):
def __init__(self):
super().__init__()
self.npoint = None
self.groupers = None
self.mlps = None
self.sas = None
def forward(self, xyz: torch.Tensor, features: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, N, C) tensor of the descriptors of the the features
Returns
-------
| new_xyz : torch.Tensor |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: gchada/ROAM
# Path: sim/rail_walker_interface/robot/robot.py
class BaseWalker(Generic[_ObsT]):
def __init__(
self,
name: Optional[str] = "robot",
Kp: float = 5,
Kd: float = 1,
force_real_control_timestep : bool = False,
limit_action_range : float = 1.0,
power_protect_factor : float = 0.1
):
assert limit_action_range > 0 and limit_action_range <= 1.0
self.name = name
self.Kp = Kp
self.Kd = Kd
self.force_real_control_timestep = force_real_control_timestep
self._last_control_t = 0.0
self.limit_action_range = limit_action_range
self._power_protect_factor = power_protect_factor
@property
def is_real_robot(self) -> bool:
return False
@property
def power_protect_factor(self) -> float:
return self._power_protect_factor
@power_protect_factor.setter
def power_protect_factor(self, value: float) -> None:
assert value >= 0 and value <= 1.0
self._power_protect_factor = value
"""
The control_timestep is the time interval between two consecutive model control actions.
"""
@property
def control_timestep(self) -> float:
pass
@property
def action_interpolation(self) -> bool:
pass
"""
The control_subtimestep is the time interval between two consecutive internal control actions. It will also be the physics timestep if in simulation.
"""
@property
def control_subtimestep(self) -> float:
pass
def receive_observation(self) -> bool:
pass
@property
def joint_qpos_init(self) -> np.ndarray:
pass
@property
def joint_qpos_sitting(self) -> np.ndarray:
pass
@cached_property
def joint_qpos_crouch(self) -> np.ndarray:
return (self.joint_qpos_init + self.joint_qpos_sitting) / 2.0
"""
This property will be used to determine the standing range of qpos of the robot.
"""
@property
def joint_qpos_offset(self) -> np.ndarray:
pass
@property
def joint_qpos_mins(self) -> np.ndarray:
pass
@property
def joint_qpos_maxs(self) -> np.ndarray:
pass
def reset(self) -> None:
pass
def get_3d_linear_velocity(self) -> np.ndarray:
pass
def get_3d_local_velocity(self) -> np.ndarray:
pass
def get_3d_angular_velocity(self) -> np.ndarray:
pass
def get_framequat_wijk(self) -> np.ndarray:
pass
def get_roll_pitch_yaw(self) -> np.ndarray:
pass
def get_last_observation(self) -> Optional[_ObsT]:
pass
def get_3d_acceleration_local(self) -> np.ndarray:
pass
def get_joint_qpos(self) -> np.ndarray:
pass
def get_joint_qvel(self) -> np.ndarray:
pass
def get_joint_qacc(self) -> np.ndarray:
pass
def get_joint_torques(self) -> np.ndarray:
pass
def _apply_action(self, action: np.ndarray) -> bool:
pass
def close(self) -> None:
pass
def __del__(self):
self.close()
@property
def action_qpos_mins(self) -> np.ndarray:
return (self.joint_qpos_mins - self.joint_qpos_init) * self.limit_action_range + self.joint_qpos_init
@property
def action_qpos_maxs(self) -> np.ndarray:
return (self.joint_qpos_maxs - self.joint_qpos_init) * self.limit_action_range + self.joint_qpos_init
def apply_action(self, action: np.ndarray) -> bool:
action = np.clip(action, self.action_qpos_mins, self.action_qpos_maxs)
if not self.force_real_control_timestep:
return self._apply_action(action)
else:
t = time.time()
dt = t - self._last_control_t
if dt >= self.control_timestep:
self._last_control_t = t
return self._apply_action(action)
else:
time_to_sleep = self.control_timestep - dt
time.sleep(time_to_sleep)
self._last_control_t = t + time_to_sleep
return self._apply_action(action)
def can_apply_action(self) -> bool:
t = time.time()
dt = t - self._last_control_t
if (not self.force_real_control_timestep) or dt >= self.control_timestep:
return True
else:
return False
def async_apply_action(self, action: np.ndarray) -> bool:
if self.can_apply_action():
self._last_control_t = time.time()
return self._apply_action(action)
else:
return False
@cached_property
def joint_nums(self) -> int:
return len(self.joint_qpos_init)
@cached_property
def action_spec(self) -> gym.spaces.Box:
return gym.spaces.Box(
low=self.joint_qpos_mins,
high=self.joint_qpos_maxs,
shape=(self.joint_nums,),
dtype=np.float32
)
def unwrapped(self):
return self
# Path: sim/rail_walker_interface/robot/robot.py
class BaseWalkerWithFootContact:
def get_foot_contact(self) -> np.ndarray:
pass
def get_foot_force(self) -> np.ndarray:
pass
# Path: sim/rail_walker_interface/joystick_policy/joystick_interfaces.py
class JoystickPolicyResetter(Generic[_RobotClsResetterT]):
def perform_reset(
self,
Robot: _RobotClsResetterT,
info_dict: dict[str,Any],
termination_provider_triggered: JoystickPolicyTerminationConditionProvider,
randomState: np.random.RandomState
) -> None:
pass
def step_resetter(
self,
Robot: _RobotClsTerminationT,
target_goal_world_delta: np.ndarray,
target_goal_local: np.ndarray,
target_yaw : float,
target_delta_yaw: float,
target_velocity: float,
velocity_to_goal: float,
change_in_abs_target_delta_yaw : float,
target_custom_data: Optional[Any],
enable_target_custom_obs : bool,
info_dict: dict[str,Any],
randomState: np.random.RandomState
) -> None:
pass
# Path: sim/rail_walker_interface/joystick_policy/joystick_interfaces.py
class JoystickPolicyRewardProvider(Generic[_RobotClsRewardProviderT]):
def get_reward(self) -> float:
raise NotImplementedError()
def step_reward(
self,
Robot: _RobotClsRewardProviderT,
action_target_qpos: np.ndarray,
target_goal_world_delta: np.ndarray,
target_goal_local: np.ndarray,
target_yaw : float,
target_delta_yaw: float,
target_velocity: float,
velocity_to_goal: float,
change_in_abs_target_delta_yaw : float,
target_custom_data: Optional[Any],
enable_target_custom_obs : bool,
info_dict: dict[str,Any],
randomState: np.random.RandomState
) -> None:
pass
def reset_reward(
self,
Robot: _RobotClsRewardProviderT,
info_dict: dict[str,Any],
termination_provider_triggered: JoystickPolicyTerminationConditionProvider,
randomState: np.random.RandomState
) -> None:
pass
# Path: sim/rail_walker_interface/joystick_policy/joystick_interfaces.py
class JoystickPolicyTargetProvider(Generic[_RobotClsTargetProviderT]):
def get_target_goal_world_delta(self, Robot: _RobotClsTargetProviderT) -> np.ndarray:
raise NotImplementedError()
def get_target_velocity(self, Robot: _RobotClsTargetProviderT) -> float:
return 0.5
def is_target_velocity_fixed(self) -> bool:
return True
def get_target_custom_data(self) -> Optional[Any]:
return None
def get_target_custom_data_observable_spec(self) -> Optional[gym.Space]:
return None
def get_target_custom_data_observable(self) -> Optional[Any]:
return None
def has_target_changed(self) -> bool:
return False
def step_target(
self,
Robot: _RobotClsTargetProviderT,
info_dict: dict[str,Any],
randomState : np.random.RandomState
) -> None:
pass
def after_step_target(
self,
Robot: _RobotClsTargetProviderT,
info_dict: dict[str,Any],
randomState : np.random.RandomState
) -> None:
pass
def reset_target(
self,
Robot: _RobotClsTargetProviderT,
info_dict: dict[str,Any],
termination_provider_triggered: JoystickPolicyTerminationConditionProvider,
randomState: np.random.RandomState
) -> None:
pass
# Path: sim/rail_walker_interface/joystick_policy/joystick_interfaces.py
class JoystickPolicyTerminationConditionProvider(Generic[_RobotClsTerminationT]):
def should_terminate(self) -> bool:
raise NotImplementedError()
def step_termination_condition(
self,
Robot: _RobotClsTerminationT,
target_goal_world_delta: np.ndarray,
target_goal_local: np.ndarray,
target_yaw : float,
target_delta_yaw: float,
target_velocity: float,
velocity_to_goal: float,
change_in_abs_target_delta_yaw : float,
target_custom_data: Optional[Any],
enable_target_custom_obs : bool,
info_dict: dict[str,Any],
randomState: np.random.RandomState
) -> None:
pass
def reset_termination_condition(
self,
Robot: _RobotClsTerminationT,
info_dict: dict[str,Any],
termination_provider_triggered,
randomState: np.random.RandomState
) -> None:
pass
# Path: sim/rail_walker_interface/joystick_policy/joystick_interfaces.py
class JoystickPolicyTargetObservable(Generic[_RobotClsTargetObsT]):
def get_observation_spec(self) -> gym.Space:
raise NotImplementedError()
def step_target_obs(
self,
Robot: _RobotClsTargetObsT,
target_goal_world_delta: np.ndarray,
target_goal_local: np.ndarray,
target_yaw : float,
target_delta_yaw: float,
target_velocity: float,
velocity_to_goal: float,
change_in_abs_target_delta_yaw : float,
target_custom_data: Optional[Any],
enable_target_custom_obs : bool,
info_dict: dict[str,Any],
randomState: np.random.RandomState
) -> None:
pass
def reset_target_obs(
self,
Robot: _RobotClsTargetObsT,
target_goal_world_delta: np.ndarray,
target_goal_local: np.ndarray,
target_yaw : float,
target_delta_yaw: float,
target_velocity: float,
info_dict: dict[str,Any],
target_custom_data: Optional[Any],
enable_target_custom_obs : bool,
termination_provider_triggered: JoystickPolicyTerminationConditionProvider,
randomState: np.random.RandomState
) -> None:
pass
def get_observation(self) -> Any:
raise NotImplementedError()
# Path: sim/rail_walker_interface/joystick_policy/joystick_policy.py
from ..robot import BaseWalker, BaseWalkerWithFootContact
from .joystick_interfaces import JoystickPolicyResetter, JoystickPolicyRewardProvider, JoystickPolicyTargetProvider, \
JoystickPolicyTerminationConditionProvider, JoystickPolicyTargetObservable
from typing import Optional, Any
import numpy as np
import transforms3d as tr3d
self._info_dict["velocity_norm"] = robot_v_norm
self._info_dict["velocity_to_goal"] = robot_v_to_goal
self._info_dict["velocity_local_x"] = robot_v_local[0]
self._info_dict["velocity_local_y"] = robot_v_local[1]
self._info_dict["velocity_local_z"] = robot_v_local[2]
self._info_dict["roll"] = robot_rpy[0]
self._info_dict["pitch"] = robot_rpy[1]
self._info_dict["yaw"] = robot_rpy[2]
self._info_dict["joint_torques"] = np.mean(np.abs(self.robot.get_joint_torques()))
self._info_dict["joint_qvels"] = np.mean(np.abs(self.robot.get_joint_qvel()))
self._info_dict["joint_qaccs"] = np.mean(np.abs(self.robot.get_joint_qacc()))
self._info_dict["joint_velocities"] = np.mean(np.abs(self.robot.get_joint_qvel()))
if hasattr(self.robot, "get_foot_force"):
foot_force: np.ndarray = self.robot.get_foot_force()
if foot_force.shape == (4,):
foot_force_names = ["FR", "FL", "RR", "RL"]
else:
foot_force_names = list(range(foot_force.shape[0]))
for i in range(len(foot_force_names)):
self._info_dict["foot_force_" + foot_force_names[i]] = foot_force[i]
self.reward_provider.step_reward(
self.robot,
self._step_target_qpos,
self.target_goal_world_delta,
self.target_goal_local,
self.target_yaw,
self.target_delta_yaw,
self._target_velocity,
robot_v_to_goal,
change_in_abs_target_delta_yaw,
self._target_custom_data,
self.enable_target_custom_obs,
self._info_dict,
random_state
)
reward_perstep = self.reward_provider.get_reward()
# assert reward_perstep is not None and reward_perstep != np.nan
self._info_dict["reward_perstep"] = reward_perstep
self._rew_step = reward_perstep
self._rew_step_final = self.reward_provider.get_reward_final()
# Step the target yaw observable
if self.target_observable is not None:
self.target_observable.step_target_obs(
self.robot,
self.target_goal_world_delta,
self.target_goal_local,
self.target_yaw,
self.target_delta_yaw,
self._target_velocity,
robot_v_to_goal,
change_in_abs_target_delta_yaw,
self._target_custom_data,
self.enable_target_custom_obs,
self._info_dict,
random_state
)
# Step resetters
for resetter in self.resetters:
resetter.step_resetter(
self.robot,
self.target_goal_world_delta,
self.target_goal_local,
self.target_yaw,
self.target_delta_yaw,
self._target_velocity,
robot_v_to_goal,
change_in_abs_target_delta_yaw,
self._target_custom_data,
self.enable_target_custom_obs,
self._info_dict,
random_state
)
# Step termination providers
for termination_provider in self.termination_providers:
termination_provider.step_termination_condition(
self.robot,
self.target_goal_world_delta,
self.target_goal_local,
self.target_yaw,
self.target_delta_yaw,
self._target_velocity,
robot_v_to_goal,
change_in_abs_target_delta_yaw,
self._target_custom_data,
self.enable_target_custom_obs,
self._info_dict,
random_state
)
if termination_provider.should_terminate():
print("Termination provider", termination_provider, "terminated the episode")
self._termination_reason = termination_provider
break
# Step truncaiton providers
for truncation_provider in self.truncation_providers:
truncation_provider.step_termination_condition(
self.robot,
self.target_goal_world_delta,
self.target_goal_local,
self.target_yaw,
self.target_delta_yaw,
self._target_velocity,
robot_v_to_goal,
change_in_abs_target_delta_yaw,
self._target_custom_data,
self.enable_target_custom_obs,
self._info_dict,
random_state
)
if truncation_provider.should_terminate():
print("Truncation provider", truncation_provider, "truncated the episode")
self._truncation_reason = truncation_provider
break
return self._info_dict.copy()
| def after_after_step( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: NUCCASJNR/PaystackPyAPI
# Path: paystackpyAPI/transaction.py
class Transaction(PaystackAPI):
INITIALIZATION_OPTIONAL_PARAMS = [
"currency",
"reference",
"callback_url",
"plan",
"invoice_limit",
"metadata",
"channels",
"split_code",
"subaccount",
"transaction_charge",
"bearer"
]
TRANSACTION_LIST_OPTIONAL_PARAMS = [
"customer",
"terminalid",
"status",
"from",
"to",
"amount"
]
CHARGE_AUTHORIZATION_OPTIONAL_PARAMS = [
"reference",
"currency",
"metadata",
"channels",
"subaccount",
"transaction_charge",
"bearer",
"queue"
]
EXPORT_OPTIONAL_PARAMS = [
'from',
'to',
'customer',
'status',
'currency',
'amount',
'settled',
'settlement',
'payment_page'
]
def __init__(self, api_key: str):
super().__init__(api_key)
self.paystack_initialization_url = "https://api.paystack.co/transaction/initialize"
self.paystack_verification_url = "https://api.paystack.co/transaction/verify"
self.list_transaction_url = "https://api.paystack.co/transaction"
self.fetch_transaction_url = "https://api.paystack.co/transaction"
self.charge_authorization_url = "https://api.paystack.co/transaction/charge_authorization"
self.transaction_timeline_url = "https://api.paystack.co/transaction/timeline"
self.transaction_totals_url = "https://api.paystack.co/transaction/totals"
self.export_transactions_url = "https://api.paystack.co/transaction/export"
def initialize_transaction(self, email: str, amount: int, **kwargs):
"""
Initialize a Paystack transaction.
:param email: Customer's email address.
:param amount: Transaction amount.
:param kwargs: Optional parameters for the transaction.
Example: `currency`, `callback_url`, etc.
:return: JSON response from Paystack API.
:raises APIError: If required parameters are missing or the API key is invalid.
"""
if not email or not amount:
raise APIError(400, "Missing required parameters: email and/or amount")
valid_kwargs = {key: value for key, value in kwargs.items() if key in self.INITIALIZATION_OPTIONAL_PARAMS}
data = {
"email": email,
"amount": amount * 100,
**valid_kwargs
}
if not self.api_key:
raise APIError(401, "Invalid API key")
headers = {
'Authorization': f'Bearer {self.api_key}',
'Content-Type': 'application/json',
}
response = requests.post(self.paystack_initialization_url, headers=headers, json=data)
if response.status_code == 200:
custom_response = {
"status_code": response.status_code,
"message": "Transaction initialized successfully",
"response_from_api": response.json()
}
else:
error_message = response.text
raise APIError(response.status_code, error_message)
return custom_response
def verify_transaction(self, reference: Union[int, str]) -> Dict:
"""
Verify a Paystack transaction.
:param reference: Reference id of the transaction (int or str).
:return: Customized response from Paystack API.
:raises APIError: If the reference is missing or the API key is invalid.
"""
if not reference:
raise APIError(400, "Missing required parameter: reference")
if not self.api_key:
raise APIError(401, "Invalid API key")
url = f"{self.paystack_verification_url}/{reference}"
headers = {
'Authorization': f'Bearer {self.api_key}'
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
custom_response = {
"status_code": response.status_code,
"message": "Transaction details retrieved successfully",
"response_from_api": response.json()
}
else:
error_message = response.text
raise APIError(response.status_code, error_message)
return custom_response
def list_transactions(self, **kwargs: Dict) -> Dict:
"""
Retrieve a list of transactions based on optional parameters.
:param kwargs: Optional parameters for filtering the list of transactions.
Supported parameters:
- `perPage`: Number of transactions to retrieve per page.
- `page`: Page number for pagination.
- `from`: Start date for transactions in the format 'YYYY-MM-DD'.
- `to`: End date for transactions in the format 'YYYY-MM-DD'.
- `customer`: Customer's email or identification.
- `status`: Transaction status (e.g., 'success', 'failed').
- `currency`: Currency code (e.g., 'NGN', 'USD').
- `amount`: Transaction amount.
- `reference`: Transaction reference.
- `gateway`: Payment gateway used (e.g., 'card', 'bank').
- `channel`: Transaction channel (e.g., 'card', 'bank').
- `plan`: Plan code associated with the transaction.
:return: Customized response with the list of transactions.
Format: {
"status_code": int,
"message": str,
"data": dict
}
:raises APIError: If the API key is invalid or if there's an issue with the request.
"""
if not self.api_key:
raise APIError(401, "Invalid API Key")
valid_kwargs = {key: value for key, value in kwargs.items() if key in self.TRANSACTION_LIST_OPTIONAL_PARAMS}
headers = {
'Authorization': f'Bearer {self.api_key}'
}
data = {
**valid_kwargs
}
response = requests.get(self.list_transaction_url, headers=headers, params=data)
if response.status_code == 200:
custom_response = {
"status_code": response.status_code,
"message": "Transactions details below",
"response_from_api": response.json()
}
else:
error_message = response.text
raise APIError(response.status_code, error_message)
return custom_response
def fetch_transaction(self, id: int) -> Dict:
"""
Fetches the details of a transaction using the id provided
:param id:
Transaction Id
"""
if not self.api_key:
raise APIError(401, "Invalid Api Key")
url = f"{self.fetch_transaction_url}/{id}"
headers = {
'Authorization': f'Bearer {self.api_key}'
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
custom_response = {
"status_code": response.status_code,
"message": "Transaction Successfully fetched",
"response_from_api": response.json()
}
else:
error_message = response.text
raise APIError(response.status_code, error_message)
return custom_response
def charge_authorization(self, email: str, amount: int, authorization_code: str, **kwargs: Dict) -> Dict:
"""charge a transaction"""
if not self.api_key:
raise APIError(401, "Invalid API Key")
valid_kwargs = {key: value for key, value in kwargs.items() if key in self.CHARGE_AUTHORIZATION_OPTIONAL_PARAMS}
if not amount:
raise APIError(400, "Missing required parameter amount")
if not email:
raise APIError(400, "Missing required parameter email")
if not authorization_code:
raise APIError(400, "Missing required parameter authorization_code")
headers = {
'Authorization': f'Bearer {self.api_key}',
'Content-Type': 'application/json',
}
data = {
"amount": amount * 100,
"email": email,
"authorization_code": f"AUTH_{authorization_code}",
**valid_kwargs
}
response = requests.post(self.charge_authorization_url, headers=headers, json=data)
if response.status_code == 200:
custom_response = {
"status_code": response.status_code,
"message": "Transaction initialized successfully",
"response_from_api": response.json()
}
else:
error_message = response.text
raise APIError(response.status_code, error_message)
return custom_response
def show_transaction_timeline(self, id_or_reference: str) -> Dict:
"""
SHow a transaction timeline
"""
headers = {
'Authorization': f'Bearer {self.api_key}'
}
url = f"{self.transaction_timeline_url}/{id_or_reference}"
response = requests.get(url, headers=headers)
if response.status_code == 200:
custom_response = {
"status_code": response.status_code,
"message": "Transaction timeline retrieved",
"response_from_api": response.json()
}
else:
error_message = response.text
raise APIError(response.status_code, error_message)
return custom_response
def get_total_transactions(self, per_page=50, page=1, from_date=None, to_date=None):
"""
Retrieve the total amount received on your account based on specified parameters.
:param per_page: Number of records to retrieve per page (default is 50).
:param page: Page number to retrieve (default is 1).
:param from_date: Start date for listing transactions in the format 'YYYY-MM-DDTHH:mm:ss.SSSZ'.
:param to_date: End date for listing transactions in the format 'YYYY-MM-DDTHH:mm:ss.SSSZ'.
:return: Customized response with the total amount received.
Format: {
"status_code": int,
"message": str,
"data": {
"total_amount": float
}
}
:raises APIError: If the API key is invalid or if there's an issue with the request.
"""
if not self.api_key:
raise APIError(401, "Invalid API Key")
headers = {
'Authorization': f'Bearer {self.api_key}'
}
params = {
'perPage': per_page,
'page': page,
'from': from_date,
'to': to_date
}
response = requests.get(self.transaction_totals_url, headers=headers, params=params)
if response.status_code == 200:
custom_response = {
"status_code": response.status_code,
"message": "Transaction totals retrieved successfully",
"response_from_api": response.json()
}
else:
error_message = response.text
raise APIError(response.status_code, error_message)
return custom_response
def download_csv(self, url, output_filename='exported_file.csv'):
response = requests.get(url)
response.raise_for_status()
with open(output_filename, 'wb') as file:
file.write(response.content)
print(f'File downloaded successfully: {output_filename}')
def export_transactions(self, per_page=50, page=1, filename="export.csv", **kwargs):
"""
initiate the export, and download the CSV file.
:param per_page: Number of records to retrieve per page (default is 50).
:param page: Page number to retrieve (default is 1).
:param filename: Optional filename for the exported CSV file.
:return: Customized response indicating the success of the export.
Format: {
"status_code": int,
"message": str,
"data": {
"exported_file": str # File path or URL
}
}
:raises APIError: If the API key is invalid, export initiation fails, or if there's an issue with the request.
"""
optional_kwargs = {key: value for key, value in kwargs.items() if key in self.EXPORT_OPTIONAL_PARAMS}
if not self.api_key:
raise APIError(401, "Invalid API key")
headers = {
'Authorization': f'Bearer {self.api_key}'
}
params = {
'perPage': per_page,
'page': page,
**optional_kwargs
}
try:
response = requests.get(self.export_transactions_url, headers=headers, params=params)
if response.status_code == 200:
data = response.json()
url_to_visit = data['data']['path']
# webbrowser.open(url_to_visit)
self.download_csv(url_to_visit, output_filename=filename)
custom_response = {
"status_code": response.status_code,
"message": f"Transactions exported successfully to {filename or url_to_visit}",
"data": {
"exported_file": filename or url_to_visit
}
}
return custom_response
except requests.exceptions.HTTPError as errh:
raise APIError(errh.response.status_code, f"HTTP Error: {errh}")
except requests.exceptions.ConnectionError as errc:
raise APIError(500, f"Error Connecting: {errc}")
except requests.exceptions.Timeout as errt:
raise APIError(500, f"Timeout Error: {errt}")
except requests.exceptions.RequestException as err:
raise APIError(500, f"An error occurred: {err}")
# Path: errors.py
class APIError(PaystackError):
"""Exception raised for errors in the Paystack API.
Attributes:
status_code -- the HTTP status code indicating the error
error_message -- a description of the error
"""
def __init__(self, status_code, error_message):
self.status_code = status_code
self.error_message = error_message
super().__init__(self.error_message)
# Path: tests/test_transaction.py
import tracemalloc
import unittest
import secrets
import responses
from unittest.mock import Mock, patch
from paystackpyAPI.transaction import Transaction
from errors import APIError
from os import getenv
ID = ''
print(ID)
class TestPaystackAPI(unittest.TestCase):
def setUp(self):
# Set up any necessary test data or configurations
self.api = Transaction(api_key=getenv("PAYSTACK_KEY"))
def tearDown(self):
# Clean up any resources used for testing
responses.stop()
responses.reset()
tracemalloc.stop()
def test_non_200_response(self):
with responses.RequestsMock() as rsps:
rsps.add(
responses.POST,
self.api.paystack_initialization_url,
status=400,
json={"status": False, "message": "Invalid request"},
)
data = {
"email": "[email protected]",
"amount": 1000,
"reference": REFERENCE,
}
with self.assertRaises(APIError) as context:
self.api.initialize_transaction(**data)
self.assertEqual(context.exception.status_code, 400)
def test_initialize_transaction(self):
data = {
"email": "[email protected]",
"amount": 1000,
"reference": REFERENCE,
}
response = self.api.initialize_transaction(**data)
self.assertEqual(response["status_code"], 200)
self.assertEqual(response["message"], "Transaction initialized successfully")
print(response["message"])
def test_verify_transaction(self):
reference = REFERENCE
response = self.api.verify_transaction(reference)
ID = response["response_from_api"]['data']['id']
self.assertEqual(response["status_code"], 200)
self.assertEqual(response["message"], "Transaction details retrieved successfully")
def test_invalid_reference_key(self):
reference = "invalid_reference"
with self.assertRaises(APIError):
self.api.verify_transaction(reference)
def test_missing_email_initialize(self):
with self.assertRaises(APIError) as context:
self.api.initialize_transaction(amount=1000, email=None)
# self.assertEqual(context.exception.status_code, 400)
self.assertIn("Missing required parameters: email and/or amount", str(context.exception))
def test_missing_amount_initialize(self):
with self.assertRaises(APIError) as context:
self.api.initialize_transaction(amount=None, email="[email protected]")
# self.assertEqual(context.exception.status_code, 400)
self.assertIn("Missing required parameters: email and/or amount", str(context.exception))
def test_missing_reference_verify(self):
with self.assertRaises(APIError) as context:
self.api.verify_transaction(reference=None)
self.assertEqual(context.exception.status_code, 400)
self.assertIn("Missing required parameter: reference", str(context.exception))
def test_list_transactions(self):
response = self.api.list_transactions()
if response["status_code"] == 401:
self.assertEqual(response["message"], "Invalid API key")
elif response["status_code"] == 200:
self.assertEqual(response["status_code"], 200)
self.assertEqual(response["message"], "Transactions details below")
def test_with_str_id(self):
with self.assertRaises(APIError) as context:
self.api.fetch_transaction("wrong_id")
self.assertEqual(context.exception.status_code, 400)
self.assertIn("Transaction ID should be numeric", str(context.exception))
def test_with_int_id(self):
with self.assertRaises(APIError) as context:
self.api.fetch_transaction(123456789)
self.assertEqual(context.exception.status_code, 404)
self.assertIn("Transaction not found", str(context.exception))
print(str(context.exception))
def test_with_valid_id(self):
response = self.api.fetch_transaction(ID)
self.assertEqual(response["status_code"], 200)
self.assertEqual(response["message"], "Transaction Successfully fetched")
print(response["message"])
def test_authorize_transaction_with_missing_amount(self):
data = {
"email": "[email protected]",
"authorization_code": "AUTH_8dfhjjdt",
"amount": None
}
with self.assertRaises(APIError) as context:
self.api.charge_authorization(**data)
print(context.exception)
self.assertEqual(context.exception.status_code, 400)
self.assertIn("Missing required parameter amount", str(context.exception))
def test_authorize_transaction_with_missing_email(self):
data = {
"email": None,
"authorization_code": "AUTH_8dfhjjdt",
"amount": 2000
}
with self.assertRaises(APIError) as context:
| self.api.charge_authorization(**data) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: UMass-Foundation-Model/genome
# Path: engine/nms.py
def nms(bounding_boxes, confidence_score, threshold):
# If no bounding boxes, return empty list
if len(bounding_boxes) == 0:
return [], []
# Bounding boxes
boxes = np.array(bounding_boxes)
# coordinates of bounding boxes
start_x = boxes[:, 0]
start_y = boxes[:, 1]
end_x = boxes[:, 2]
end_y = boxes[:, 3]
# Confidence scores of bounding boxes
score = np.array(confidence_score)
# Picked bounding boxes
picked_boxes = []
picked_score = []
# Compute areas of bounding boxes
areas = (end_x - start_x + 1) * (end_y - start_y + 1)
# Sort by confidence score of bounding boxes
order = np.argsort(score)
# Iterate bounding boxes
while order.size > 0:
# The index of largest confidence score
index = order[-1]
# Pick the bounding box with largest confidence score
picked_boxes.append(bounding_boxes[index])
picked_score.append(confidence_score[index])
# Compute ordinates of intersection-over-union(IOU)
x1 = np.maximum(start_x[index], start_x[order[:-1]])
x2 = np.minimum(end_x[index], end_x[order[:-1]])
y1 = np.maximum(start_y[index], start_y[order[:-1]])
y2 = np.minimum(end_y[index], end_y[order[:-1]])
# Compute areas of intersection-over-union
w = np.maximum(0.0, x2 - x1 + 1)
h = np.maximum(0.0, y2 - y1 + 1)
intersection = w * h
# Compute the ratio between intersection and union
ratio = intersection / (areas[index] + areas[order[:-1]] - intersection)
left = np.where(ratio < threshold)
order = order[left]
return picked_boxes, picked_score
# Path: engine/viper/vision_models.py
class BaseModel(abc.ABC):
class ObjectDetector(BaseModel):
class DepthEstimationModel(BaseModel):
class CLIPModel(BaseModel):
class MaskRCNNModel(BaseModel):
class OwlViTModel(BaseModel):
class GLIPModel(BaseModel):
class OurGLIPDemo(GLIPDemo):
class TCLModel(BaseModel):
class GPT3Model(BaseModel):
class CodexModel(BaseModel):
class BLIPModel(BaseModel):
class SaliencyModel(BaseModel):
class XVLMModel(BaseModel):
def __init__(self, gpu_number):
def forward(self, *args, **kwargs):
def name(cls) -> str:
def list_processes(cls):
def __init__(self, gpu_number=0):
def forward(self, image: torch.Tensor):
def __init__(self, gpu_number=0, model_type='DPT_Large'):
def forward(self, image: torch.Tensor):
def __init__(self, gpu_number=0, version="ViT-L/14@336px"): # @336px
def _convert_image_to_rgb(self, image):
def get_clip_transforms_from_tensor(self, n_px=336):
def binary_score(self, image: torch.Tensor, prompt, negative_categories=None):
def clip_negatives(self, prompt_prefix, negative_categories=None):
def classify(self, image: Union[torch.Tensor, list], categories: list[str], return_index=True):
def compare(self, images: list[torch.Tensor], prompt, return_scores=False):
def forward(self, image, prompt, task='score', return_index=True, negative_categories=None, return_scores=False):
def __init__(self, gpu_number=0, threshold=config.detect_thresholds.maskrcnn):
def prepare_image(self, image):
def detect(self, images: torch.Tensor, return_labels=True):
def forward(self, image, return_labels=False):
def __init__(self, gpu_number=0, threshold=config.detect_thresholds.owlvit):
def forward(self, image: torch.Tensor, text: List[str], return_labels: bool = False):
def __init__(self, model_size='large', gpu_number=0, *args):
def __init__(self, dev, *args_demo):
def compute_prediction(self, original_image, original_caption, custom_entity=None):
def to_left_right_upper_lower(bboxes):
def to_xmin_ymin_xmax_ymax(bboxes):
def prepare_image(image):
def forward(self, image: torch.Tensor, obj: Union[str, list], return_labels: bool = False,
confidence_threshold=None):
def forward(self, *args, **kwargs):
def __init__(self, gpu_number=0):
def transform(self, image):
def prepare_image(self, image):
def binary_score(self, images: Union[list[torch.Tensor], torch.Tensor], prompt):
def classify(self, image, texts, return_index=True):
def forward(self, image, texts, task='classify', return_index=True):
def gpt3_cache_aux(fn_name, prompts, temperature, n_votes, result):
def __init__(self, gpu_number=0):
def process_answer(answer):
def get_union(lists):
def most_frequent(answers):
def get_qa(self, prompts, prompt_base: str=None) -> list[str]:
def get_qa_fn(self, prompt):
def get_general(self, prompts) -> list[str]:
def query_gpt3(self, prompt, model="text-davinci-003", max_tokens=16, logprobs=None, stream=False,
stop=None, top_p=1, frequency_penalty=0, presence_penalty=0):
def forward(self, prompt, process_name):
def list_processes(cls):
def codex_helper(extended_prompt):
def __init__(self, gpu_number=0):
def forward(self, prompt, input_type='image', prompt_file=None, base_prompt=None):
def forward_(self, extended_prompt):
def __init__(self, gpu_number=0, half_precision=config.blip_half_precision,
blip_v2_model_type=config.blip_v2_model_type):
def caption(self, image, prompt=None):
def pre_question(self, question):
def qa(self, image, question):
def qa_long(self, image, question):
def qa_test(self, image, question):
def forward(self, image, question=None, task='caption'):
def __init__(self, gpu_number=0,
path_checkpoint=f'{config.path_pretrained_models}/saliency_inspyrenet_plus_ultra'):
def forward(self, image):
def __init__(self, gpu_number=0,
path_checkpoint=f'{config.path_pretrained_models}/xvlm/retrieval_mscoco_checkpoint_9.pth'):
def pre_caption(caption, max_words):
def score(self, images, texts):
def binary_score(self, image, text, negative_categories):
def forward(self, image, text, task='score', negative_categories=None):
# Path: engine/viper/vision_processes.py
def make_fn(model_class, process_name, gpu_number):
"""
model_class.name and process_name will be the same unless the same model is used in multiple processes, for
different tasks
"""
# We initialize each one on a separate GPU, to make sure there are no out of memory errors
model_instance = model_class(gpu_number=gpu_number)
def _function(*args, **kwargs):
if process_name != model_class.name:
kwargs['process_name'] = process_name
if model_class.to_batch and not config.multiprocessing:
# Batchify the input. Model expects a batch. And later un-batchify the output.
args = [[arg] for arg in args]
kwargs = {k: [v] for k, v in kwargs.items()}
# The defaults that are not in args or kwargs, also need to listify
full_arg_spec = inspect.getfullargspec(model_instance.forward)
if full_arg_spec.defaults is None:
default_dict = {}
else:
default_dict = dict(zip(full_arg_spec.args[-len(full_arg_spec.defaults):], full_arg_spec.defaults))
non_given_args = full_arg_spec.args[1:][len(args):]
non_given_args = set(non_given_args) - set(kwargs.keys())
for arg_name in non_given_args:
kwargs[arg_name] = [default_dict[arg_name]]
try:
out = model_instance.forward(*args, **kwargs)
if model_class.to_batch and not config.multiprocessing:
out = out[0]
except Exception as e:
print(f'Error in {process_name} model:', e)
out = None
return out
return _function
# Path: engine/api.py
import cv2
import os
import json
import torch
import openai
import functools
import numpy as np
import face_detection
import io, tokenize
import augly.image as imaugs
import time
from math import ceil, floor
from augly.utils.base_paths import EMOJI_DIR
from PIL import Image,ImageDraw,ImageFont,ImageFilter
from transformers import (ViltProcessor, ViltForQuestionAnswering,
OwlViTProcessor, OwlViTForObjectDetection,
MaskFormerFeatureExtractor, MaskFormerForInstanceSegmentation,
CLIPProcessor, CLIPModel, AutoProcessor, BlipForQuestionAnswering)
from diffusers import StableDiffusionInpaintPipeline
from .nms import nms
from engine.viper import vision_models
from engine.viper.vision_processes import make_fn
from torchvision import transforms
from typing import List, Union
from pynvml import nvmlDeviceGetHandleByIndex, nvmlInit, nvmlDeviceGetCount, nvmlDeviceGetMemoryInfo, nvmlShutdown
result = self.forward(transforms.ToTensor()(image), obj, return_labels, confidence_threshold)
if isinstance(result, tuple):
result = result[0]
boxes = result.tolist()
return boxes
class tcl_model():
def __init__(self):
print(f'Registering tcl model')
self.forward = make_fn(vision_models.TCLModel, 'tcl', choose_gpu())
def predict(self, image, texts, task='classify', return_index=True):
result = self.forward(transforms.ToTensor()(image), texts, task, return_index)
return result
class gpt3_model():
def __init__(self):
print(f'Registering gpt3 model')
self.forward = make_fn(vision_models.GPT3Model, 'gpt3', choose_gpu())
def predict(self, prompt, process_name='gpt3_qa'):
result = self.forward(prompt, process_name)
return result
class codex_model():
def __init__(self):
print(f'Registering codex model')
self.forward = make_fn(vision_models.CodexModel, 'codex', choose_gpu())
def predict(self, prompt, input_type='image', prompt_file=None, base_prompt=None):
result = self.forward(prompt, input_type, prompt_file, base_prompt)
return result
class blip_model():
def __init__(self):
print(f'Registering blip model')
self.forward = make_fn(vision_models.BLIPModel, 'blip', choose_gpu())
def predict(self, image, question=None, task='caption'):
result = self.forward(image, question, task)
return result
class saliency_model():
def __init__(self):
print(f'Registering saliency model')
self.forward = make_fn(vision_models.SaliencyModel, 'saliency', choose_gpu())
def predict(self, image):
result = self.forward(transforms.ToTensor()(image))
return result
class xvlm_model():
def __init__(self):
print(f'Registering xvlm model')
self.forward = make_fn(vision_models.XVLMModel, 'xvlm', choose_gpu())
def predict(self, image, text, negative_categories=None, task='binary'):
result = self.forward(transforms.ToTensor()(image), text, task, negative_categories)
return result
class face_detection_model():
def __init__(self):
print(f'Registering face_detection model')
self.model = face_detection.build_detector("DSFDDetector", confidence_threshold=.5, nms_iou_threshold=.3, device=torch.device(f"cuda:{choose_gpu()}"))
def predict(self, image):
with torch.no_grad():
faces = self.model.detect(np.array(image))
return faces
class segment_model():
def __init__(self):
print(f'Registering segment model')
self.device = f"cuda:{choose_gpu()}" if torch.cuda.is_available() else "cpu"
self.feature_extractor = MaskFormerFeatureExtractor.from_pretrained(
"facebook/maskformer-swin-base-coco")
self.model = MaskFormerForInstanceSegmentation.from_pretrained(
"facebook/maskformer-swin-base-coco").to(self.device)
self.model.eval()
def predict(self, img):
inputs = self.feature_extractor(images=img, return_tensors="pt")
inputs = {k:v.to(self.device) for k,v in inputs.items()}
with torch.no_grad():
outputs = self.model(**inputs)
outputs = self.feature_extractor.post_process_panoptic_segmentation(outputs)[0]
instance_map = outputs['segmentation'].cpu().numpy()
objs = []
print(outputs.keys())
for seg in outputs['segments_info']:
inst_id = seg['id']
label_id = seg['label_id']
category = self.model.config.id2label[label_id]
mask = (instance_map==inst_id).astype(float)
resized_mask = np.array(
Image.fromarray(mask).resize(
img.size,resample=Image.BILINEAR))
Y,X = np.where(resized_mask>0.5)
x1,x2 = np.min(X), np.max(X)
y1,y2 = np.min(Y), np.max(Y)
num_pixels = np.sum(mask)
objs.append(dict(
mask=resized_mask,
category=category,
box=[x1,y1,x2,y2],
inst_id=inst_id
))
return objs
class select_model():
def __init__(self):
| print(f'Registering select model') |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: dcermak/rpm-spec-language-server
# Path: rpm_spec_language_server/server.py
class RpmSpecLanguageServer(LanguageServer):
_CONDITION_KEYWORDS = [
# from https://github.com/rpm-software-management/rpm/blob/7d3d9041af2d75c4709cf7a721daf5d1787cce14/build/rpmbuild_internal.h#L58
"%endif",
"%else",
"%if",
"%ifarch",
"%ifnarch",
"%ifos",
"%ifnos",
"%include",
"%elifarch",
"%elifos",
"%elif",
]
def __init__(self) -> None:
super().__init__(name := "rpm_spec_language_server", metadata.version(name))
self.spec_files: dict[str, SpecSections] = {}
self.macros = Macros.dump()
self.auto_complete_data = create_autocompletion_documentation_from_spec_md(
spec_md_from_rpm_db() or ""
)
def macro_and_scriptlet_completions(
self, with_percent: bool
) -> list[CompletionItem]:
return (
[
CompletionItem(
label=key if with_percent else key[1:], documentation=value
)
for key, value in self.auto_complete_data.scriptlets.items()
]
+ [
CompletionItem(label=keyword if with_percent else keyword[1:])
for keyword in self._CONDITION_KEYWORDS
]
+ [
CompletionItem(label=f"%{macro.name}" if with_percent else macro.name)
for macro in self.macros
]
)
@property
def trigger_characters(self) -> list[str]:
return list(
set(
preamble_element[0]
for preamble_element in {
**self.auto_complete_data.preamble,
**self.auto_complete_data.dependencies,
}
).union({"%"})
)
def spec_sections_from_cache_or_file(
self, text_document: TextDocumentIdentifier | TextDocumentItem
) -> SpecSections | None:
if sections := self.spec_files.get((uri := text_document.uri), None):
return sections
if not (spec := spec_from_text_document(text_document)):
return None
self.spec_files[uri] = (sect := SpecSections.parse(spec))
return sect
# Path: rpm_spec_language_server/server.py
def create_rpm_lang_server() -> RpmSpecLanguageServer:
rpm_spec_server = RpmSpecLanguageServer()
def did_open_or_save(
server: RpmSpecLanguageServer,
param: DidOpenTextDocumentParams | DidSaveTextDocumentParams,
) -> None:
LOGGER.debug("open or save event")
if not (spec := spec_from_text_document(param.text_document)):
return None
LOGGER.debug("Saving parsed spec for %s", param.text_document.uri)
server.spec_files[param.text_document.uri] = SpecSections.parse(spec)
rpm_spec_server.feature(TEXT_DOCUMENT_DID_OPEN)(did_open_or_save)
rpm_spec_server.feature(TEXT_DOCUMENT_DID_SAVE)(did_open_or_save)
@rpm_spec_server.feature(TEXT_DOCUMENT_DID_CLOSE)
def did_close(
server: RpmSpecLanguageServer, param: DidCloseTextDocumentParams
) -> None:
if param.text_document.uri in server.spec_files:
del server.spec_files[param.text_document.uri]
@rpm_spec_server.feature(TEXT_DOCUMENT_DID_CHANGE)
def did_change(
server: RpmSpecLanguageServer, param: DidChangeTextDocumentParams
) -> None:
LOGGER.debug("Text document %s changed", (uri := param.text_document.uri))
if spec := spec_from_text(
server.workspace.text_documents[uri].source, os.path.basename(uri)
):
server.spec_files[uri] = SpecSections.parse(spec)
LOGGER.debug("Updated the spec for %s", uri)
@rpm_spec_server.feature(
TEXT_DOCUMENT_COMPLETION,
CompletionOptions(trigger_characters=rpm_spec_server.trigger_characters),
)
def complete_macro_name(
server: RpmSpecLanguageServer, params: CompletionParams
) -> CompletionList:
if not (
spec_sections := server.spec_sections_from_cache_or_file(
text_document=params.text_document
)
):
return CompletionList(is_incomplete=False, items=[])
trigger_char = (
None if params.context is None else params.context.trigger_character
)
# we are *not* in the preamble or a %package foobar section
# only complete macros
if not (
cur_sect := spec_sections.section_under_cursor(params.position)
) or not cur_sect.name.startswith("package"):
# also if we have no completion context, just send macros and if we
# have it, only send them if this was triggered by a %
LOGGER.debug(
"Sending completions for outside the package section with trigger_character %s",
trigger_char,
)
if (trigger_char and trigger_char == "%") or trigger_char is None:
return CompletionList(
is_incomplete=False,
items=server.macro_and_scriptlet_completions(
with_percent=trigger_char is None
),
)
return CompletionList(is_incomplete=False, items=[])
# we are in a package section => we can return preamble and dependency
# tags as completion items too
# return everything if we have no trigger character
if trigger_char is None:
LOGGER.debug(
"Sending completions for %package/preamble without a trigger_character"
)
return CompletionList(
is_incomplete=False,
items=[
CompletionItem(label=key, documentation=value)
for key, value in {
**server.auto_complete_data.dependencies,
**server.auto_complete_data.preamble,
}.items()
]
+ server.macro_and_scriptlet_completions(with_percent=True),
)
if trigger_char == "%":
LOGGER.debug("Sending completions for %package/premable triggered by %")
return CompletionList(
is_incomplete=False,
items=server.macro_and_scriptlet_completions(with_percent=False),
)
else:
LOGGER.debug(
"Sending completions for %package/premable triggered by %s",
trigger_char,
)
return CompletionList(
is_incomplete=False,
items=[
CompletionItem(label=key, documentation=value)
for key, value in {
**server.auto_complete_data.dependencies,
**server.auto_complete_data.preamble,
}.items()
if key.startswith(trigger_char)
],
)
@rpm_spec_server.feature(TEXT_DOCUMENT_DOCUMENT_SYMBOL)
def spec_symbols(
server: RpmSpecLanguageServer,
param: DocumentSymbolParams,
) -> list[DocumentSymbol] | list[SymbolInformation] | None:
if not (
spec_sections := server.spec_sections_from_cache_or_file(
text_document=param.text_document
)
):
return None
return spec_sections.to_document_symbols()
@rpm_spec_server.feature(TEXT_DOCUMENT_DEFINITION)
def find_macro_definition(
server: RpmSpecLanguageServer,
param: DefinitionParams,
) -> Location | list[Location] | list[LocationLink] | None:
# get the in memory spec if available
if not (
spec_sections := server.spec_sections_from_cache_or_file(
param.text_document
)
):
return None
macro_under_cursor = get_macro_under_cursor(
spec=spec_sections.spec, position=param.position, macros_dump=server.macros
)
if not macro_under_cursor:
return None
macro_name = (
macro_under_cursor
if isinstance(macro_under_cursor, str)
else macro_under_cursor.name
)
macro_level = (
MacroLevel.SPEC
if isinstance(macro_under_cursor, str)
else macro_under_cursor.level
)
def find_macro_define_in_spec(file_contents: str) -> list[re.Match[str]]:
"""Searches for the definition of the macro ``macro_under_cursor``
as it would appear in a spec file, i.e.: ``%global macro`` or
``%define macro``.
"""
regex = re.compile(
rf"^([\t \f]*)(%(?:global|define))([\t \f]+)({macro_name})",
re.MULTILINE,
)
return list(regex.finditer(file_contents))
def find_macro_in_macro_file(file_contents: str) -> list[re.Match[str]]:
"""Searches for the definition of the macro ``macro_under_cursor``
as it would appear in a rpm macros file, i.e.: ``%macro …``.
"""
regex = re.compile(
rf"^([\t \f]*)(%{macro_name})([\t \f]+)(\S+)", re.MULTILINE
)
return list(regex.finditer(file_contents))
def find_preamble_definition_in_spec(
file_contents: str,
) -> list[re.Match[str]]:
regex = re.compile(
rf"^([\t \f]*)({macro_name}):([\t \f]+)(\S*)",
re.MULTILINE | re.IGNORECASE,
)
if (m := regex.search(file_contents)) is None:
return []
return [m]
define_matches, file_uri = [], None
# macro is defined in the spec file
if macro_level == MacroLevel.GLOBAL:
if not (
define_matches := find_macro_define_in_spec(str(spec_sections.spec))
):
return None
file_uri = param.text_document.uri
# macro is something like %version, %release, etc.
elif macro_level == MacroLevel.SPEC:
if not (
define_matches := find_preamble_definition_in_spec(
str(spec_sections.spec)
)
):
return None
file_uri = param.text_document.uri
# the macro comes from a macro file
#
# We have now two options, either it is provided by a rpm package. Then
# there will be a package providing `rpm_macro($NAME)`. If that is the
# case, then we query the rpm db for all files provided by all packages
# providing this symbol and look for the macro definition in all files
# that are in %_rpmmacrodir (nothing else will be loaded by rpm)
#
# If this yields nothing, then the macro most likely comes from the
# builtin macros file of rpm (_should_ be in %_rpmconfigdir/macros) so
# we retry the search in that file.
elif macro_level == MacroLevel.MACROFILES:
MACROS_DIR = rpm.expandMacro("%_rpmmacrodir")
ts = rpm.TransactionSet()
# search in packages
for pkg in ts.dbMatch("provides", f"rpm_macro({macro_name})"):
for f in rpm.files(pkg):
if f.name.startswith(MACROS_DIR):
with open(f.name) as macro_file_f:
if define_matches := find_macro_in_macro_file(
macro_file_f.read(-1)
):
file_uri = f"file://{f.name}"
break
# we didn't find a match
# => the macro can be from %_rpmconfigdir/macros (no provides generated for it)
if not define_matches:
fname = rpm.expandMacro("%_rpmconfigdir") + "/macros"
with open(fname) as macro_file_f:
if define_matches := find_macro_in_macro_file(
macro_file_f.read(-1)
):
file_uri = f"file://{fname}"
if define_matches and file_uri:
return [
Location(
uri=file_uri,
range=Range(
start := position_from_match(define_match),
Position(
line=start.line,
character=(
start.character
+ define_match.end()
- define_match.start()
),
),
),
)
for define_match in define_matches
]
return None
@rpm_spec_server.feature(TEXT_DOCUMENT_HOVER)
def expand_macro(
server: RpmSpecLanguageServer, params: HoverParams
) -> Hover | None:
if spec_sections := server.spec_files.get(params.text_document.uri, None):
macro = get_macro_under_cursor(
spec=spec_sections.spec,
position=params.position,
macros_dump=server.macros,
)
else:
macro = get_macro_under_cursor(
text_document=params.text_document,
position=params.position,
macros_dump=server.macros,
)
# not a macro or an unknown macro => cannot show a meaningful hover
if not macro or isinstance(macro, str):
return None
if macro.level == MacroLevel.BUILTIN:
return Hover(contents="builtin")
try:
expanded_macro = Macros.expand(macro.body)
formatted_macro = f"```bash\n{expanded_macro}\n```"
contents = MarkupContent(kind=MarkupKind.Markdown, value=formatted_macro)
return Hover(contents)
except RPMException:
return Hover(contents=macro.body)
return rpm_spec_server
# Path: tests/conftest.py
import asyncio
import os
import threading
import pytest
from typing import Generator
from lsprotocol.types import (
EXIT,
INITIALIZE,
SHUTDOWN,
ClientCapabilities,
InitializeParams,
)
from pygls.server import LanguageServer
from typeguard import install_import_hook
from rpm_spec_language_server.server import (
RpmSpecLanguageServer,
create_rpm_lang_server,
)
install_import_hook("rpm_spec_language_server")
class ClientServer:
# shamelessly stolen from
# https://github.com/openlawlibrary/pygls/blob/8f601029dcf3c7c91be7bf2d86a841a1598ce1f0/tests/ls_setup.py#L109
def __init__(self):
# Client to Server pipe
csr, csw = os.pipe()
# Server to client pipe
scr, scw = os.pipe()
# Setup Server
self.server = create_rpm_lang_server()
self.server_thread = threading.Thread(
name="Server Thread",
target=self.server.start_io,
args=(os.fdopen(csr, "rb"), os.fdopen(scw, "wb")),
)
self.server_thread.daemon = True
# Setup client
self.client = LanguageServer("client", "v1", asyncio.new_event_loop())
self.client_thread = threading.Thread(
name="Client Thread",
target=self.client.start_io,
args=(os.fdopen(scr, "rb"), os.fdopen(csw, "wb")),
)
self.client_thread.daemon = True
@classmethod
def decorate(cls):
return pytest.mark.parametrize("client_server", [cls], indirect=True)
def start(self) -> None:
self.server_thread.start()
self.server.thread_id = self.server_thread.ident
self.client_thread.start()
self.initialize()
def stop(self) -> None:
shutdown_response = self.client.lsp.send_request(SHUTDOWN).result()
assert shutdown_response is None
self.client.lsp.notify(EXIT)
self.server_thread.join()
self.client._stop_event.set()
try:
self.client.loop._signal_handlers.clear() # HACK ?
except AttributeError:
pass
self.client_thread.join()
# @retry_stalled_init_fix_hack()
def initialize(self) -> None:
timeout = None if "DISABLE_TIMEOUT" in os.environ else 1
response = self.client.lsp.send_request(
INITIALIZE,
InitializeParams(
process_id=12345, root_uri="file://", capabilities=ClientCapabilities()
),
).result(timeout=timeout)
assert response.capabilities is not None
def __iter__(self) -> Generator[LanguageServer, None, None]:
yield self.client
yield self.server
CLIENT_SERVER_T = Generator[tuple[LanguageServer, RpmSpecLanguageServer], None, None]
@pytest.fixture
def client_server() -> CLIENT_SERVER_T:
cs = ClientServer()
| cs.start() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ziqi-zhang/TAOISM
# Path: python/layers/activation.py
class SecretActivationLayer(SecretNonlinearLayer):
def __init__(
self, sid, LayerName, EnclaveMode, link_prev=True, link_next=True,
manually_register_prev=False, manually_register_next=False, merge_own_tensors=False
):
super().__init__(sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next)
self.Shapefortranspose = None
self.link_prev = link_prev
self.link_next = link_next
self.manual_register_prev = manually_register_prev
self.manual_register_next = manually_register_next
self.merge_own_tensors = merge_own_tensors
def init_shape(self):
self.InputShape = self.PrevLayer.get_output_shape()
self.OutputShape = self.InputShape
self.HandleShape = self.InputShape
def init(self, start_enclave=True):
TensorLoader.init(self, start_enclave)
def link_tensors(self):
if self.merge_own_tensors:
self.manually_link_owned_two_tensors("input", "output")
super().link_tensors()
def get_output_shape(self):
return self.OutputShape
def generate_tensor_name_list(self, force=False):
if not force and self.tensor_name_list:
return
if self.sid == 2:
self.tensor_name_list = {}
return
if len(self.InputShape) == 4:
# self.Shapefortranspose = [int(round(((self.InputShape[0] * self.InputShape[1] * self.InputShape[2] * self.InputShape[3])/262144+1/2))), 262144, 1, 1]
self.Shapefortranspose = [int(round(((self.InputShape[0] * self.InputShape[1] * self.InputShape[2] * self.InputShape[3])/602112+1/2))), 602112, 1, 1]
else:
self.Shapefortranspose = self.InputShape
NeededTensorNames = [("output", self.OutputShape, None),
("handle", self.HandleShape, None),
# ("DerInput", self.InputShape, None),
("input", self.InputShape, None),
# ("DerOutput", self.OutputShape, None),
("inputtrans", self.Shapefortranspose, None),
("outputtrans", self.Shapefortranspose, None),
]
self.tensor_name_list = NeededTensorNames
def forward(self):
with NamedTimerInstance(f"S{self.sid}: {self.LayerName} Forward", verbose_level=VerboseLevel.LAYER):
with NamedTimerInstance(f" S{self.sid}: {self.LayerName} Input Preprocess", verbose_level=VerboseLevel.LAYER):
self.forward_tensor_transfer()
# self.requires_grad_on_cpu("input")
if self.EnclaveMode == ExecutionModeOptions.Enclave:
# if self.PrevLayer.EnclaveMode is not ExecutionModeOptions.Enclave:
# with NamedTimerInstance(f" S{self.sid}: {self.LayerName} Input Preprocess", verbose_level=VerboseLevel.LAYER):
# self.transfer_enclave_to_cpu("input")
# if torch.sum(self.get_cpu("input").abs()) == 0:
# raise RuntimeError(f"{self.LayerName}: SGX input not load")
# self.transfer_cpu_to_enclave("input")
with NamedTimerInstance(f" S{self.sid}: {self.LayerName} ForwardFunc", verbose_level=VerboseLevel.LAYER):
self.ForwardFunc("input", "output")
elif self.EnclaveMode == ExecutionModeOptions.CPU:
if self.PrevLayer.EnclaveMode is not ExecutionModeOptions.CPU and torch.sum(self.get_cpu("input").abs()) == 0:
raise RuntimeError(f"{self.LayerName}: SGX input not load")
self.set_cpu("output", self.ForwardFunc(self.get_cpu("input")))
elif self.EnclaveMode == ExecutionModeOptions.GPU:
if self.PrevLayer.EnclaveMode is not ExecutionModeOptions.GPU and torch.sum(self.get_gpu("input").abs()) == 0:
raise RuntimeError(f"{self.LayerName}: SGX input not load")
self.set_gpu("output", self.ForwardFunc(self.get_gpu("input")))
else:
raise RuntimeError
def backward(self):
with NamedTimerInstance(f"S{self.sid}: {self.LayerName} Backward", verbose_level=VerboseLevel.LAYER):
self.backward_tensor_transfer()
if self.is_enclave_mode:
self.BackwardFunc("output", "DerOutput", "DerInput")
else:
self.set_cpu("DerInput", self.get_cpu("output").grad_fn(self.get_cpu("DerOutput")))
# Path: python/sgx_net.py
def conv2d_op(w, x, is_div=True):
def base_conv2d(sub_x, sub_w):
def sum_of_div(best_shape):
def conv2d_input_grad_op(w, dy):
def conv2d_weight_grad_op(dy, x, is_div=True):
def base_conv2d_weight_grad_op(sub_dy, sub_x):
def sum_of_div(best_shape):
def matmul_op(w, x):
def matmul_input_grad_op(w, dy):
def matmul_weight_grad_op(dy, x):
def set_tensor_name_maybe_quantized(name, quantized):
def secret_op_class_factory(sid, target_op_name):
def __init__(self, name):
def target_op(self, a, b):
def __init__(self, sid, nn_name):
def set_layers(self, layers):
def execute_for_each_layer(self, func, reverse=False):
def classifier_output(self):
def get_loss(self):
def forward_with_time(self):
def run_forward(layer):
def forward(self):
def run_forward(layer):
def backward(self):
def run_backward(layer):
def plain_forward(self):
def plain_backward(self):
def show_plain_error(self):
def __init__(self, sid):
def set_layers(self, layers):
def generate_tensor_name_list(self, force=False):
def update_params(self, test_with_ideal=False):
def update_params_in_layer(self, layer, test_with_ideal=False):
def ideal_update_params_with_name(self, layer, der_name, param_name, shape):
def warming_up_cuda():
def init_communicate(rank, master_address, master_port, backend='gloo'):
class SecretNeuralNetwork(TensorLoader):
class SgdOptimizer(TensorLoader):
# Path: python/tensor_loader.py
class TensorLoader(EnclaveInterface):
def __init__(self):
super().__init__()
self.sid = -1
self.tensor_name_list = []
self.encryption_tensor_name_list = {}
self.RandomVarName = None
self.ShareVarName = None
self.ShareTuple = None
def init(self, start_enclave=True):
if start_enclave:
print("Initializing sid: %d" % self.sid)
self.init_enclave()
self.generate_tensor_name_list()
# if hasattr(self, "LayerName") and self.LayerName == "Layer1.0.main.relu2":
# st()
self.init_enclave_tensors()
self.init_cpu_tensor()
self.init_encryption_tensor()
def generate_tensor_name_list(self, force=False):
return
def link_tensors(self):
pass
def init_enclave_tensors(self):
self.generate_tensor_name_list()
for TensorName, shape, SeedList in self.tensor_name_list:
if shape is None:
raise ValueError("The shape is None. Please setup the shape before init_enclave_tensor")
# print(f"TensorLoader init {TensorName}, {shape}")
self.init_enclave_tensor(TensorName, shape)
if SeedList is None:
continue
for seed in SeedList:
self.set_seed(TensorName, seed)
def set_cpu(self, name, t):
# print("---", name, self.get_tag(name))
GlobalTensor.set_cpu(self.get_tag(name), t)
def set_gpu(self, name, t):
GlobalTensor.set_gpu(self.get_tag(name), t)
def set_encryption(self, name, t):
GlobalTensor.set_encryption(self.get_tag(name), t)
def get_cpu(self, name):
return GlobalTensor.get_cpu(self.get_tag(name))
def get_gpu(self, name):
return GlobalTensor.get_gpu(self.get_tag(name))
def get_encryption(self, name):
return GlobalTensor.get_encryption(self.get_tag(name))
def generate_cpu_tensor(self, name, shape):
self.set_cpu(name, torch.zeros(shape).type(SecretConfig.dtypeForCpuOp))
# self.CpuTensors[name] = torch.zeros(shape).type(SecretConfig.dtypeForCpuOp)
def transfer_cpu_to_gpu(self, name):
self.set_gpu(name, self.get_cpu(name).cuda(non_blocking=True).type(SecretConfig.dtypeForCudaMm))
# self.GpuTensors[name] = self.CpuTensors[name].cuda(non_blocking=True).type(SecretConfig.dtypeForCudaMm)
def transfer_gpu_to_cpu(self, name):
cpu_tensor = self.get_cpu(name)
gpu_tensor = self.get_gpu(name)
cpu_tensor.copy_(gpu_tensor.type(SecretConfig.dtypeForCpuOp))
def transfer_enclave_to_cpu(self, name):
self.from_enclave(name, self.get_cpu(name))
def transfer_cpu_to_enclave(self, name):
self.set_tensor(name, self.get_cpu(name))
def init_cpu_tensor(self):
self.generate_tensor_name_list()
for TensorName, shape, _ in self.tensor_name_list:
self.generate_cpu_tensor(TensorName, shape)
def init_encryption_tensor(self):
self.generate_tensor_name_list()
for name, shape in self.encryption_tensor_name_list:
GlobalTensor.init_encrypted_tensor(self.get_tag(name), shape)
# self.EncrtyptedTensors[name] = self.CreateEncryptTorch(shape)
def set_tensor_cpu_enclave(self, name, tensor):
# GlobalTensor.SetNamedTensor(self.GetTag(tag), tensor)
self.set_cpu(name, tensor)
self.set_tensor(name, tensor)
# print("Set cpu enclave: ", tensor[0,:10])
def set_tensor_cpu_gpu_enclave(self, name, tensor):
# GlobalTensor.SetNamedTensor(self.GetTag(tag), tensor)
self.set_cpu(name, tensor)
self.set_tensor(name, tensor)
self.set_gpu(name, tensor)
# print("Set cpu enclave: ", tensor[0,:10])
def from_enclave(self, name, tensor):
self.get_tensor(name, tensor)
# def generate_enclave_tensor(self, name):
# if name in self.RandomVarName:
# return self.async_get_random(name, self.get_cpu(name))
# elif name in self.ShareVarName:
# original, seed = self.ShareTuple[name]
# return self.async_get_share(original, self.get_cpu(name), seed)
# else:
# raise Exception("Doesnt how to generate this tensor")
# Path: python/utils/timer_utils.py
class NamedTimerInstance(object):
def __init__(self, name, verbose_level=VerboseLevel.EVERY):
self.name = name
self.verbose_level = verbose_level
def __enter__(self):
return NamedTimer.start(self.name, verbose_level=self.verbose_level)
...
def __exit__(self, *args):
NamedTimer.end(self.name)
...
# Path: python/utils/timer_utils.py
class VerboseLevel(IntEnum):
EVERY = 1
LAYER = 2
RUN = 3
EPOCH = 4
# Path: python/utils/torch_utils.py
def compare_expected_actual(expected, actual, show_where_err=False, get_relative=False, verbose=False, show_values=False):
def purify(x):
# return torch.tensor(x)
res = x
# if not (isinstance(x, torch.Tensor) or isinstance(x, torch.Variable)):
if not (isinstance(x, torch.Tensor) ):
res = torch.tensor(x)
# return x.detach().numpy()
return res.type(torch.float).to("cpu")
expected = purify(expected)
actual = purify(actual)
if show_values:
print("expected:", expected[0, 0])
print("actual:", actual[0, 0])
avg_abs_diff = torch.mean(torch.abs(expected - actual)).item()
res = avg_abs_diff
if show_where_err:
show_indices = torch.abs(expected - actual) / torch.abs(expected) > 0.5
# show_indices = (expected != actual)
print("error indices: ", np.where(show_indices.cpu()))
print("expected values:", expected[show_indices])
print("difference:", (expected - actual)[show_indices])
if get_relative:
tmp_expected, tmp_actual = expected[expected != 0], actual[expected != 0]
relative_diff = torch.abs(tmp_expected - tmp_actual) / torch.abs(tmp_expected)
relative_avg_diff = torch.mean(torch.abs(tmp_actual - tmp_expected)) / torch.mean(torch.abs(tmp_expected))
Error = namedtuple("Error", ("AvgAbsDiff", "RelAvgDiff", "AvgRelDiff", "StdRelDiff"))
res = Error(avg_abs_diff, relative_avg_diff.item(), torch.mean(relative_diff).item(), torch.std(relative_diff).item())
if verbose:
print(res)
return res
# Path: python/utils/basic_utils.py
class ExecutionModeOptions(Enum):
Enclave = 1
CPU = 2
GPU = 3
# Path: python/global_config.py
class SecretConfig(object):
worldSize = 3
PrimeLimit = (1 << 21) - 9
dtypeForCpuMod = torch.float32
dtypeForCudaMm = torch.float64
dtypeForCpuOp = torch.float32
dtypeForSave = torch.float32
stateless_logfile = "stateless.log"
stateless_logger_name = "stateless_logger"
is_comptue_gpu = True
# Path: python/layers/batch_norm_2d.py
import numpy as np
import torch
from pdb import set_trace as st
from python.layers.activation import SecretActivationLayer
from python.sgx_net import LearnableParamTuple
from python.tensor_loader import TensorLoader
from python.utils.timer_utils import NamedTimerInstance, VerboseLevel
from python.utils.torch_utils import compare_expected_actual
from python.utils.basic_utils import ExecutionModeOptions
from python.global_config import SecretConfig
# ("DerOutput", self.OutputShape, None),
("weight", self.WeightShape, None),
# ("DerWeight", self.WeightShape, None),
("bias", self.WeightShape, None),
# ("DerBias", self.WeightShape, None),
("RunMean", self.WeightShape, None),
("CurMean", self.WeightShape, None),
("RunVar", self.WeightShape, None),
("CurVar", self.WeightShape, None),
("mu", self.InputShape, None),
]
else:
NeededTensorNames = [
("output", self.OutputShape, None),
# ("DerInput", self.InputShape, None),
("input", self.InputShape, None),
("weight", self.WeightShape, None),
# ("DerWeight", self.WeightShape, None),
("bias", self.WeightShape, None),
# ("DerBias", self.WeightShape, None),
# ("DerOutput", self.OutputShape, None)
]
self.tensor_name_list = NeededTensorNames
# def forward(self):
# if self.sid == 2:
# return
# with NamedTimerInstance(f"S{self.sid}: {self.LayerName} Forward", verbose_level=VerboseLevel.LAYER):
# if self.is_enclave_mode:
# self.forward_tensor_transfer()
# self.batchnorm_forward(self.LayerName, int(False))
# else:
# self.forward_tensor_transfer()
# self.requires_grad_on_cpu("input")
# self.ForwardFunc.bias.data.copy_(self.get_cpu("bias"))
# self.ForwardFunc.weight.data.copy_(self.get_cpu("weight"))
# self.ForwardFunc.running_mean.data.copy_(self.get_cpu("RunMean"))
# # running_var of PlainFunc is ^2 of that in the enclave
# enclave_running_var = self.get_cpu("RunVar")
# self.ForwardFunc.running_var.data.copy_(enclave_running_var)
# self.set_cpu("output", self.ForwardFunc(self.get_cpu("input")))
def forward(self):
with NamedTimerInstance(f"S{self.sid}: {self.LayerName} Forward", verbose_level=VerboseLevel.LAYER):
if self.EnclaveMode == ExecutionModeOptions.Enclave:
# if self.LayerName == "Layer2.0.downsample.bn":
# st()
with NamedTimerInstance(f" S{self.sid}: {self.LayerName} Input Preprocess", verbose_level=VerboseLevel.LAYER):
self.forward_tensor_transfer()
with NamedTimerInstance(f" S{self.sid}: {self.LayerName} batchnorm_forward", verbose_level=VerboseLevel.LAYER):
self.batchnorm_forward(self.LayerName, int(False))
elif self.EnclaveMode == ExecutionModeOptions.CPU:
self.forward_tensor_transfer()
self.ForwardFunc.bias.data.copy_(self.get_cpu("bias"))
self.ForwardFunc.weight.data.copy_(self.get_cpu("weight"))
self.ForwardFunc.running_mean.data.copy_(self.get_cpu("RunMean"))
# running_var of PlainFunc is ^2 of that in the enclave
enclave_running_var = self.get_cpu("RunVar")
self.ForwardFunc.running_var.data.copy_(enclave_running_var)
self.set_cpu("output", self.ForwardFunc(self.get_cpu("input")))
elif self.EnclaveMode == ExecutionModeOptions.GPU:
self.forward_tensor_transfer()
self.ForwardFunc.bias.data.copy_(self.get_gpu("bias"))
self.ForwardFunc.weight.data.copy_(self.get_gpu("weight"))
self.ForwardFunc.running_mean.data.copy_(self.get_gpu("RunMean"))
# running_var of PlainFunc is ^2 of that in the enclave
enclave_running_var = self.get_gpu("RunVar")
self.ForwardFunc.running_var.data.copy_(enclave_running_var)
# st()
# print(self.get_gpu("input")[0,0,0])
self.set_gpu("output", self.ForwardFunc(self.get_gpu("input").type(SecretConfig.dtypeForCpuOp)))
def backward(self):
raise NotImplementedError
if self.sid == 2:
return
with NamedTimerInstance(f"S{self.sid}: {self.LayerName} Backward", verbose_level=VerboseLevel.LAYER):
if self.is_enclave_mode:
self.backward_tensor_transfer()
self.batchnorm_backward(self.LayerName)
else:
self.backward_tensor_transfer()
BackwardInput, BackwardWeight, BackwardBias = self.get_cpu("output").grad_fn(self.get_cpu("DerOutput"))
self.set_cpu("DerInput", BackwardInput.data)
self.set_cpu("DerWeight", BackwardWeight.data)
self.set_cpu("DerBias", BackwardBias.data)
if list(self.get_cpu("DerWeight").shape) != self.WeightShape:
real_shape = self.get_cpu("DerWeight").shape
ideal_shape = self.WeightShape
raise ValueError(
f"DerWeight is not of shape self.AffineShape: real: {real_shape}, ideal: {ideal_shape}")
if list(self.get_cpu("DerBias").shape) != self.WeightShape:
raise ValueError("DerBias is not of shape self.AffineShape")
def plain_forward(self, NeedBackward=False):
if self.sid == 2:
return
if self.EnclaveMode in [ExecutionModeOptions.Enclave, ExecutionModeOptions.GPU]:
self.make_sure_cpu_is_latest("input")
self.make_sure_cpu_is_latest("bias")
self.make_sure_cpu_is_latest("weight")
self.requires_grad_on_cpu("input")
self.PlainFunc.bias.data.copy_(self.get_cpu("bias"))
self.PlainFunc.weight.data.copy_(self.get_cpu("weight"))
self.PlainFunc.running_mean.data.copy_(self.get_cpu("RunMean"))
# self.PlainFunc.running_var.data.copy_(self.get_cpu("RunVar"))
# running_var of PlainFunc is ^2 of that in the enclave
enclave_running_var = self.get_cpu("RunVar")
self.PlainFunc.running_var.data.copy_(enclave_running_var)
else:
self.make_sure_cpu_is_latest("input")
self.requires_grad_on_cpu("input")
with NamedTimerInstance(f"S{self.sid}: {self.LayerName} PlainForward"):
torch.set_num_threads(1)
self.PlainForwardResult = self.PlainFunc(self.get_cpu("input"))
torch.set_num_threads(4)
def plain_backward(self):
| if self.sid == 2: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: NVlabs/M2T2
# Path: m2t2/dataset_utils.py
class NormalizeInverse(transforms.Normalize):
def __init__(self, mean, std):
def __call__(self, tensor):
def depth_to_xyz(depth, intrinsics):
def jitter_gaussian(xyz, std, clip):
def sample_points(xyz, num_points):
Z = depth
X = (u - cx) * (Z / fx)
Y = (v - cy) * (Z / fy)
# Path: m2t2/m2t2.py
class M2T2(nn.Module):
def __init__(
self,
backbone: nn.Module,
transformer: nn.Module,
object_encoder: nn.Module = None,
grasp_mlp: nn.Module = None,
set_criterion: nn.Module = None,
grasp_criterion: nn.Module = None,
place_criterion: nn.Module = None
):
super(M2T2, self).__init__()
self.backbone = backbone
self.object_encoder = object_encoder
self.transformer = transformer
self.grasp_mlp = grasp_mlp
self.set_criterion = set_criterion
self.grasp_criterion = grasp_criterion
self.place_criterion = place_criterion
@classmethod
def from_config(cls, cfg):
args = {}
args['backbone'] = PointNet2MSG.from_config(cfg.scene_encoder)
channels = args['backbone'].out_channels
obj_channels = None
if cfg.contact_decoder.num_place_queries > 0:
args['object_encoder'] = PointNet2MSGCls.from_config(
cfg.object_encoder
)
obj_channels = args['object_encoder'].out_channels
args['place_criterion'] = PlaceCriterion.from_config(
cfg.place_loss
)
args['transformer'] = ContactDecoder.from_config(
cfg.contact_decoder, channels, obj_channels
)
if cfg.contact_decoder.num_grasp_queries > 0:
args['grasp_mlp'] = ActionDecoder.from_config(
cfg.action_decoder, args['transformer']
)
matcher = HungarianMatcher.from_config(cfg.matcher)
args['set_criterion'] = SetCriterion.from_config(
cfg.grasp_loss, matcher
)
args['grasp_criterion'] = GraspCriterion.from_config(
cfg.grasp_loss
)
return cls(**args)
def forward(self, data, cfg):
scene_feat = self.backbone(data['inputs'])
object_inputs = data['object_inputs']
object_feat = {}
if self.object_encoder is not None:
object_feat = self.object_encoder(object_inputs)
if 'task_is_place' in data:
for key, val in object_feat['features'].items():
object_feat['features'][key] = (
val * data['task_is_place'].view(
data['task_is_place'].shape[0], 1, 1
)
)
lang_tokens = data.get('lang_tokens')
embedding, outputs = self.transformer(
scene_feat, object_feat, lang_tokens
)
losses = {}
if self.place_criterion is not None:
losses, stats = self.place_criterion(outputs, data)
outputs[-1].update(stats)
if self.set_criterion is not None:
set_losses, outputs = self.set_criterion(outputs, data)
losses.update(set_losses)
else:
outputs = outputs[-1]
if self.grasp_mlp is not None:
mask_features = scene_feat['features'][
self.transformer.mask_feature
]
obj_embedding = [emb[idx] for emb, idx in zip(
embedding['grasp'], outputs['matched_idx']
)]
confidence = [
mask.sigmoid() for mask in outputs['matched_grasping_masks']
]
grasp_outputs = self.grasp_mlp(
data['points'], mask_features, confidence,
cfg.mask_thresh, obj_embedding, data['grasping_masks']
)
outputs.update(grasp_outputs)
contact_losses = self.grasp_criterion(outputs, data)
losses.update(contact_losses)
return outputs, losses
def infer(self, data, cfg):
scene_feat = self.backbone(data['inputs'])
object_feat = self.object_encoder(data['object_inputs'])
if 'task_is_place' in data:
for key in object_feat['features']:
object_feat['features'][key] = (
object_feat['features'][key] * data['task_is_place'].view(
data['task_is_place'].shape[0], 1, 1
)
)
lang_tokens = data.get('lang_tokens')
embedding, outputs = self.transformer(
scene_feat, object_feat, lang_tokens
)
outputs = outputs[-1]
if 'place' in embedding and embedding['place'].shape[1] > 0:
cam_pose = None if cfg.world_coord else data['cam_pose']
placement_outputs = infer_placements(
data['points'], outputs['placement_masks'],
data['bottom_center'], data['ee_pose'],
cam_pose, cfg.mask_thresh, cfg.placement_height
)
outputs.update(placement_outputs)
outputs['placement_masks'] = (
outputs['placement_masks'].sigmoid() > cfg.mask_thresh
)
if 'grasp' in embedding and embedding['grasp'].shape[1] > 0:
masks = outputs['grasping_masks'].sigmoid() > cfg.mask_thresh
mask_features = scene_feat['features'][
self.transformer.mask_feature
]
if 'objectness' in outputs:
objectness = outputs['objectness'].sigmoid()
object_ids = [
torch.where(
(score > cfg.object_thresh) & mask.sum(dim=1) > 0
)[0]
for score, mask in zip(objectness, masks)
]
outputs['objectness'] = [
score[idx] for score, idx in zip(objectness, object_ids)
]
confidence = [
logits.sigmoid()[idx]
for logits, idx in zip(outputs['grasping_masks'], object_ids)
]
outputs['grasping_masks'] = [
mask[idx] for mask, idx in zip(masks, object_ids)
]
obj_embedding = [emb[idx] for emb, idx in zip(
embedding['grasp'], object_ids
)]
else:
obj_embedding = embedding['grasp']
confidence = [
logits.sigmoid() for logits in outputs['grasping_masks']
]
grasp_outputs = self.grasp_mlp(
data['points'], mask_features, confidence,
cfg.mask_thresh, obj_embedding
)
outputs.update(grasp_outputs)
return outputs
# Path: m2t2/pointnet2_utils.py
class FurthestPointSampling(Function):
class GatherOperation(Function):
class ThreeNN(Function):
class ThreeInterpolate(Function):
class GroupingOperation(Function):
class BallQuery(Function):
class QueryAndGroup(nn.Module):
class GroupAll(nn.Module):
def forward(ctx, xyz, npoint):
def backward(ctx, grad_out):
def forward(ctx, features, idx):
def backward(ctx, grad_out):
def forward(ctx, unknown, known):
def backward(ctx, grad_dist, grad_idx):
def forward(ctx, features, idx, weight):
def backward(ctx, grad_out):
def forward(ctx, features, idx):
def backward(ctx, grad_out):
def forward(ctx, radius, nsample, xyz, new_xyz):
def backward(ctx, grad_out):
def __init__(self, radius, nsample, use_xyz=True):
def forward(self, xyz, new_xyz, features=None):
def __init__(self, use_xyz=True):
def forward(self, xyz, new_xyz, features=None):
N = features.size(2)
N = features.size(2)
# Path: m2t2/meshcat_utils.py
def create_visualizer(clear=True):
print(
"Waiting for meshcat server... have you started a server? Run `meshcat-server` to start a server"
)
vis = meshcat.Visualizer(zmq_url="tcp://127.0.0.1:6000")
if clear:
vis.delete()
return vis
# Path: m2t2/meshcat_utils.py
def visualize_pointcloud(vis, name, pc, color=None, transform=None, **kwargs):
"""
Args:
vis: meshcat visualizer object
name: str
pc: Nx3 or HxWx3
color: (optional) same shape as pc[0 - 255] scale or just rgb tuple
transform: (optional) 4x4 homogeneous transform
"""
if pc.ndim == 3:
pc = pc.reshape(-1, pc.shape[-1])
if color is not None:
if isinstance(color, list):
color = np.array(color)
color = np.array(color)
# Resize the color np array if needed.
if color.ndim == 3:
color = color.reshape(-1, color.shape[-1])
if color.ndim == 1:
color = np.ones_like(pc) * np.array(color)
# Divide it by 255 to make sure the range is between 0 and 1,
color = color.astype(np.float32) / 255
else:
color = np.ones_like(pc)
vis[name].set_object(
meshcat.geometry.PointCloud(position=pc.T, color=color.T, **kwargs)
)
if transform is not None:
vis[name].set_transform(transform)
# Path: m2t2/meshcat_utils.py
def visualize_grasp(vis, name, transform, color=[255, 0, 0], **kwargs):
grasp_vertices = load_grasp_points()
vis[name].set_object(
g.Line(
g.PointsGeometry(grasp_vertices),
g.MeshBasicMaterial(color=rgb2hex(tuple(color)), **kwargs),
)
)
vis[name].set_transform(transform.astype(np.float64))
# Path: m2t2/rlbench_utils.py
def load_image(episode_dir, camera, meta_data, frame_id):
def within_bound(demo, cameras, bounds):
def gripper_pose_from_rlbench(pose, gripper_depth=0.1034):
def gripper_pose_to_rlbench(pose, gripper_depth=0.1034):
# Path: m2t2/train_utils.py
def to_gpu(dic):
for key in dic:
if isinstance(dic[key], torch.Tensor):
dic[key] = dic[key].cuda()
elif isinstance(dic[key], list):
if isinstance(dic[key][0], torch.Tensor):
for i in range(len(dic[key])):
dic[key][i] = dic[key][i].cuda()
elif isinstance(dic[key][0], list):
for i in range(len(dic[key])):
for j in range(len(dic[key][i])):
if isinstance(dic[key][i][j], torch.Tensor):
dic[key][i][j] = dic[key][i][j].detach().cuda()
# Path: m2t2/train_utils.py
def to_cpu(dic):
for key in dic:
if isinstance(dic[key], torch.Tensor):
dic[key] = dic[key].detach().cpu()
elif isinstance(dic[key], list):
if isinstance(dic[key][0], torch.Tensor):
for i in range(len(dic[key])):
dic[key][i] = dic[key][i].detach().cpu()
elif isinstance(dic[key][0], list):
for i in range(len(dic[key])):
for j in range(len(dic[key][i])):
if isinstance(dic[key][i][j], torch.Tensor):
dic[key][i][j] = dic[key][i][j].detach().cpu()
# Path: m2t2/m2t2_agent.py
from torchvision import transforms
from typing import List
from m2t2.dataset_utils import normalize_rgb
from m2t2.m2t2 import M2T2
from m2t2.pointnet2_utils import furthest_point_sample
from m2t2.meshcat_utils import (
create_visualizer, visualize_pointcloud, visualize_grasp
)
from m2t2.rlbench_utils import (
pcd_rgb_within_bound, gripper_pose_from_rlbench,
gripper_pose_to_rlbench, rotation_to_rlbench
)
from m2t2.train_utils import to_gpu, to_cpu
from yarr.agents.agent import Agent, ActResult, Summary
import numpy as np
import pickle
import torch
import trimesh.transformations as tra
# pt_idx = torch.randperm(pcd.shape[0])[:self.cfg.eval.num_points]
pt_idx = furthest_point_sample(
pcd.unsqueeze(0).cuda(), self.cfg.eval.num_points
).cpu().long()[0]
pcd, rgb = pcd[pt_idx], rgb[pt_idx]
data = {
'inputs': torch.cat([pcd - pcd.mean(axis=0), rgb], dim=1),
'points': pcd.unsqueeze(1).float(),
'lang_tokens': torch.from_numpy(
self.lang_emb[obs['lang_goal']]
).float()
}
self.place = False
if 'take' in obs['lang_goal'] or 'put' in obs['lang_goal']:
if obs['gripper_open'] == 0:
obj_in_hand_id = 92 if 'steak' in obs['lang_goal'] else 83
if (mask == obj_in_hand_id).sum() > 0:
self.place = True
if self.place:
obj_pcd = pcd_raw[mask == obj_in_hand_id]
obj_rgb = rgb_raw[mask == obj_in_hand_id]
# visualize_pointcloud(self.vis, 'object', obj_pcd, obj_rgb, size=0.02)
obj_pcd = torch.from_numpy(obj_pcd).float()
obj_rgb = self.normalize_rgb(
torch.from_numpy(obj_rgb / 255).float().T.unsqueeze(-1)
).squeeze(-1).T
pt_idx = furthest_point_sample(
obj_pcd.unsqueeze(0).cuda(), self.cfg.eval.num_obj_points
).cpu().long()[0]
obj_pcd, obj_rgb = obj_pcd[pt_idx], obj_rgb[pt_idx]
data['ee_pose'] = torch.from_numpy(
gripper_pose_from_rlbench(obs['gripper_matrix'][0])
).float()
# make_frame(self.vis, 'end_effector', T=data['ee_pose'].double().numpy())
inv_ee_pose = data['ee_pose'].inverse()
obj_pcd_ee = obj_pcd @ inv_ee_pose[:3, :3].T + inv_ee_pose[:3, 3]
obj_pcd_ee = obj_pcd_ee - obj_pcd_ee.mean(axis=0)
data['object_points'] = obj_pcd
data['object_inputs'] = torch.cat([obj_pcd_ee, obj_rgb], dim=1)
data['task_is_pick'] = torch.tensor(False)
data['task_is_place'] = torch.tensor(True)
else:
data['object_points'] = torch.rand(100, 3)
data['object_inputs'] = torch.rand(100, 6)
data['ee_pose'] = torch.eye(4)
data['task_is_pick'] = torch.tensor(True)
data['task_is_place'] = torch.tensor(False)
to_gpu(data)
for key in data:
data[key] = data[key].unsqueeze(0)
# print(key, data[key].shape)
with torch.no_grad():
outputs = self.model.infer(data, self.cfg)
to_cpu(outputs)
# for key in outputs:
# print(key, outputs[key][0].shape)
self.params = outputs['params'][0].numpy()
self.pose = gripper_pose_to_rlbench(outputs['actions'][0][0].numpy())
trans = self.pose[:3, 3] - self.params[0] * self.pose[:3, 2]
rot = rotation_to_rlbench(self.pose)
gripper_open = not self.place
self.before = False
# mask = outputs['contact_masks'][0]
# contacts = data['points'][0].cpu()[mask].numpy()
# confidence = outputs['confidence'][0][mask].numpy()
# colors = (confidence * np.array([[0, 255, 0]])).astype('uint8')
# visualize_pointcloud(self.vis, 'contacts', contacts, colors, size=0.02)
# action = outputs['actions'][0][0].numpy()
# visualize_grasp(
# self.vis, f'action/at', action, colors[0], linewidth=5
# )
# before = action.copy()
# before[:3, 3] -= self.params[0] * before[:3, 2]
# visualize_grasp(
# self.vis, f'action/before', before,
# np.roll(colors[0], -1), linewidth=2
# )
# after = action.copy()
# after[:3, 3] -= self.params[1] * after[:3, 2]
# after[:3, :3] = after[:3, :3] @ tra.euler_matrix(
# 0, 0, self.params[2]
# )[:3, :3]
# visualize_grasp(
# self.vis, f'action/after', after,
# np.roll(colors[0], -2), linewidth=2
# )
# retract = after.copy()
# retract[:3, 3] -= retract[:3, 2] * self.params[3]
# visualize_grasp(
# self.vis, f'action/retract', retract, colors[0], linewidth=2
# )
# input()
elif self.after:
# print(step, 'after')
trans = self.pose[:3, 3] - self.params[1] * self.pose[:3, 2]
rot = rotation_to_rlbench(
self.pose @ tra.euler_matrix(0, 0, self.params[2])
)
gripper_open = self.params[4] > 0
self.after = False
self.retract = True
elif self.retract:
# print(step, 'retract')
trans = self.pose[:3, 3] - (
self.params[1] + self.params[3]
) * self.pose[:3, 2]
rot = rotation_to_rlbench(self.pose)
gripper_open = self.params[4] > 0
self.retract = False
self.before = True
else:
# print(step, 'act')
trans = self.pose[:3, 3]
rot = rotation_to_rlbench(self.pose)
if self.place:
gripper_open = 1
trans = trans - 0.02 * self.pose[:3, 2]
| self.before = True |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Codra-Ingenierie-Informatique/DataLab
# Path: cdl/config.py
CONF_VERSION = "1.0.0"
APP_NAME = "DataLab"
MOD_NAME = "cdl"
APP_DESC = _("""DataLab is a generic signal and image processing platform""")
APP_PATH = osp.dirname(__file__)
DEBUG = os.environ.get("DEBUG", "").lower() in ("1", "true")
TEST_SEGFAULT_ERROR = len(os.environ.get("TEST_SEGFAULT_ERROR", "")) > 0
DATETIME_FORMAT = "%d/%m/%Y - %H:%M:%S"
DATAPATH = configtools.get_module_data_path(MOD_NAME, "data")
SHOTPATH = osp.join(
configtools.get_module_data_path(MOD_NAME), os.pardir, "doc", "images", "shots"
)
OTHER_PLUGINS_PATHLIST = [configtools.get_module_data_path(MOD_NAME, "plugins")]
IS_FROZEN = is_frozen(MOD_NAME)
PLOTPY_DEFAULTS = {
"plot": {
# "antialiasing": False,
# "title/font/size": 12,
# "title/font/bold": False,
# "marker/curve/text/font/size": 8,
# "marker/curve/text/font/family": "default",
# "marker/curve/text/font/bold": False,
# "marker/curve/text/font/italic": False,
"marker/curve/text/textcolor": "black",
# "marker/curve/text/background_color": "#ffffff",
# "marker/curve/text/background_alpha": 0.8,
# "marker/cross/text/font/family": "default",
# "marker/cross/text/font/size": 8,
# "marker/cross/text/font/bold": False,
# "marker/cross/text/font/italic": False,
"marker/cross/text/textcolor": "black",
# "marker/cross/text/background_color": "#ffffff",
"marker/cross/text/background_alpha": 0.7,
# "marker/cross/line/style": "DashLine",
# "marker/cross/line/color": "yellow",
# "marker/cross/line/width": 1,
# "marker/cursor/text/font/size": 8,
# "marker/cursor/text/font/family": "default",
# "marker/cursor/text/font/bold": False,
# "marker/cursor/text/font/italic": False,
# "marker/cursor/text/textcolor": "#ff9393",
# "marker/cursor/text/background_color": "#ffffff",
# "marker/cursor/text/background_alpha": 0.8,
"shape/drag/symbol/marker": "NoSymbol",
"shape/mask/symbol/size": 5,
"shape/mask/sel_symbol/size": 8,
# -----------------------------------------------------------------------------
# Annotated shape style for annotations:
"shape/annotation/line/style": "SolidLine",
"shape/annotation/line/color": "#ffff00",
"shape/annotation/line/width": 1,
"shape/annotation/fill/style": "SolidPattern",
"shape/annotation/fill/color": MAIN_BG_COLOR,
"shape/annotation/fill/alpha": 0.1,
"shape/annotation/symbol/marker": "Rect",
"shape/annotation/symbol/size": 3,
"shape/annotation/symbol/edgecolor": "#ffff00",
"shape/annotation/symbol/facecolor": "#ffff00",
"shape/annotation/symbol/alpha": 1.0,
"shape/annotation/sel_line/style": "SolidLine",
"shape/annotation/sel_line/color": "#00ff00",
"shape/annotation/sel_line/width": 1,
"shape/annotation/sel_fill/style": "SolidPattern",
"shape/annotation/sel_fill/color": MAIN_BG_COLOR,
"shape/annotation/sel_fill/alpha": 0.1,
"shape/annotation/sel_symbol/marker": "Rect",
"shape/annotation/sel_symbol/size": 9,
"shape/annotation/sel_symbol/edgecolor": "#00aa00",
"shape/annotation/sel_symbol/facecolor": "#00ff00",
"shape/annotation/sel_symbol/alpha": 0.7,
# -----------------------------------------------------------------------------
# Annotated shape style for result shapes / signals:
"shape/result/s/line/style": "SolidLine",
"shape/result/s/line/color": MAIN_FG_COLOR,
"shape/result/s/line/width": 1,
"shape/result/s/fill/style": "SolidPattern",
"shape/result/s/fill/color": MAIN_BG_COLOR,
"shape/result/s/fill/alpha": 0.1,
"shape/result/s/symbol/marker": "XCross",
"shape/result/s/symbol/size": 7,
"shape/result/s/symbol/edgecolor": MAIN_FG_COLOR,
"shape/result/s/symbol/facecolor": MAIN_FG_COLOR,
"shape/result/s/symbol/alpha": 1.0,
"shape/result/s/sel_line/style": "SolidLine",
"shape/result/s/sel_line/color": "#00ff00",
"shape/result/s/sel_line/width": 1,
"shape/result/s/sel_fill/style": "SolidPattern",
"shape/result/s/sel_fill/color": MAIN_BG_COLOR,
"shape/result/s/sel_fill/alpha": 0.1,
"shape/result/s/sel_symbol/marker": "Rect",
"shape/result/s/sel_symbol/size": 9,
"shape/result/s/sel_symbol/edgecolor": "#00aa00",
"shape/result/s/sel_symbol/facecolor": "#00ff00",
"shape/result/s/sel_symbol/alpha": 0.7,
# -----------------------------------------------------------------------------
# Annotated shape style for result shapes / images:
"shape/result/i/line/style": "SolidLine",
"shape/result/i/line/color": "#ffff00",
"shape/result/i/line/width": 1,
"shape/result/i/fill/style": "SolidPattern",
"shape/result/i/fill/color": MAIN_BG_COLOR,
"shape/result/i/fill/alpha": 0.1,
"shape/result/i/symbol/marker": "Rect",
"shape/result/i/symbol/size": 3,
"shape/result/i/symbol/edgecolor": "#ffff00",
"shape/result/i/symbol/facecolor": "#ffff00",
"shape/result/i/symbol/alpha": 1.0,
"shape/result/i/sel_line/style": "SolidLine",
"shape/result/i/sel_line/color": "#00ff00",
"shape/result/i/sel_line/width": 1,
"shape/result/i/sel_fill/style": "SolidPattern",
"shape/result/i/sel_fill/color": MAIN_BG_COLOR,
"shape/result/i/sel_fill/alpha": 0.1,
"shape/result/i/sel_symbol/marker": "Rect",
"shape/result/i/sel_symbol/size": 9,
"shape/result/i/sel_symbol/edgecolor": "#00aa00",
"shape/result/i/sel_symbol/facecolor": "#00ff00",
"shape/result/i/sel_symbol/alpha": 0.7,
# -----------------------------------------------------------------------------
},
}
def is_frozen(module_name: str) -> bool:
def get_mod_source_dir() -> str | None:
def get_def_dict(cls, category: str) -> dict:
def set_def_dict(cls, category: str, def_dict: dict) -> None:
def get_old_log_fname(fname):
def initialize():
def reset():
class MainSection(conf.Section, metaclass=conf.SectionMeta):
class ConsoleSection(conf.Section, metaclass=conf.SectionMeta):
class IOSection(conf.Section, metaclass=conf.SectionMeta):
class ProcSection(conf.Section, metaclass=conf.SectionMeta):
class ViewSection(conf.Section, metaclass=conf.SectionMeta):
class Conf(conf.Configuration, metaclass=conf.ConfMeta):
# Path: cdl/core/io/h5/common.py
class H5Importer:
"""DataLab HDF5 importer class"""
def __init__(self, filename):
self.h5file = h5py.File(filename)
self.__nodes = {}
self.root = RootNode(self.h5file)
self.__nodes[self.root.id] = self.root.dset
self.root.collect_children(self.__nodes)
NODE_FACTORY.run_post_triggers(self)
@property
def nodes(self):
"""Return all nodes"""
return self.__nodes.values()
def get(self, node_id: str):
"""Return node associated to id"""
return self.__nodes[node_id]
def get_relative(self, node: BaseNode, relpath: str, ancestor: int = 0):
"""Return node using relative path to another node"""
path = "/" + (
"/".join(node.id.split("/")[:-ancestor]) + "/" + relpath.strip("/")
).strip("/")
return self.__nodes[path]
def close(self):
"""Close HDF5 file"""
self.__nodes = {}
self.h5file.close()
# Path: cdl/env.py
DEBUG = os.environ.get("DEBUG", "").lower() in ("1", "true")
QUIET = "quiet"
NORMAL = "normal"
DEBUG = "debug"
UNATTENDED_ARG = "unattended"
VERBOSE_ARG = "verbose"
SCREENSHOT_ARG = "screenshot"
DELAY_ARG = "delay"
XMLRPCPORT_ARG = "xmlrpcport"
DONOTQUIT_ENV = "CDL_DO_NOT_QUIT"
UNATTENDED_ENV = GuiDataExecEnv.UNATTENDED_ENV
VERBOSE_ENV = GuiDataExecEnv.VERBOSE_ENV
SCREENSHOT_ENV = GuiDataExecEnv.SCREENSHOT_ENV
DELAY_ENV = GuiDataExecEnv.DELAY_ENV
XMLRPCPORT_ENV = "CDL_XMLRPCPORT"
CATCHER_TEST_ENV = "CDL_CATCHER_TEST"
class VerbosityLevels(enum.Enum):
class CDLExecEnv:
def __init__(self):
def to_dict(self):
def __str__(self):
def enable_demo_mode(self, delay: int):
def __get_mode(env):
def __set_mode(env, value):
def do_not_quit(self):
def do_not_quit(self, value):
def unattended(self):
def unattended(self, value):
def catcher_test(self):
def catcher_test(self, value):
def screenshot(self):
def screenshot(self, value):
def verbose(self):
def verbose(self, value):
def delay(self):
def delay(self, value: int):
def xmlrpcport(self):
def xmlrpcport(self, value: int):
def parse_args(self):
def set_env_from_args(self, args):
def log(self, source: Any, *objects: Any) -> None:
def print(self, *objects, sep=" ", end="\n", file=sys.stdout, flush=False):
def pprint(
self,
obj,
stream=None,
indent=1,
width=80,
depth=None,
compact=False,
sort_dicts=True,
):
# Path: cdl/obj.py
# Path: cdl/utils/qthelpers.py
def qt_handle_error_message(widget: QW.QWidget, message: str, context: str = None):
"""Handles application (QWidget) error message"""
traceback.print_exc()
txt = str(message)
msglines = txt.splitlines()
firstline = _("Error:") if context is None else f"%s: {context}" % _("Context")
msglines.insert(0, firstline)
if len(msglines) > 10:
msglines = msglines[:10] + ["..."]
title = widget.window().objectName()
QW.QMessageBox.critical(widget, title, os.linesep.join(msglines))
# Path: cdl/utils/strings.py
def to_string(obj: Any) -> str:
"""Convert to string, trying utf-8 then latin-1 codec"""
if isinstance(obj, bytes):
try:
return obj.decode()
except UnicodeDecodeError:
return obj.decode("latin-1")
try:
return str(obj)
except UnicodeDecodeError:
return str(obj, encoding="latin-1")
# Path: cdl/widgets/h5browser.py
import abc
import os
import os.path as osp
from guidata.qthelpers import (
add_actions,
create_action,
create_toolbutton,
get_icon,
win32_fix_title_bar_background,
)
from plotpy.plot import PlotOptions, PlotWidget
from qtpy import QtCore as QC
from qtpy import QtGui as QG
from qtpy import QtWidgets as QW
from cdl.config import _
from cdl.core.io.h5 import H5Importer
from cdl.env import execenv
from cdl.obj import SignalObj
from cdl.utils.qthelpers import qt_handle_error_message
from cdl.utils.strings import to_string
self.scrollToItem(items[0])
def item_selection_changed(self) -> None:
"""Item selection has changed"""
is_selection = len(self.selectedItems()) > 0
self.expand_selection_action.setEnabled(is_selection)
self.collapse_selection_action.setEnabled(is_selection)
def get_top_level_items(self) -> list[QW.QTreeWidgetItem]:
"""Iterate over top level items"""
return [self.topLevelItem(_i) for _i in range(self.topLevelItemCount())]
def get_items(self) -> list[QW.QTreeWidgetItem]:
"""Return items (excluding top level items)"""
itemlist = []
def add_to_itemlist(item: QW.QTreeWidgetItem):
for index in range(item.childCount()):
citem = item.child(index)
itemlist.append(citem)
add_to_itemlist(citem)
for tlitem in self.get_top_level_items():
add_to_itemlist(tlitem)
return itemlist
def find_all_items(self):
"""Find all items"""
return self.findItems("", QC.Qt.MatchContains | QC.Qt.MatchRecursive)
def contextMenuEvent(self, event: QG.QContextMenuEvent) -> None:
"""Override Qt method"""
self.update_menu()
self.menu.popup(event.globalPos())
class H5TreeWidget(BaseTreeWidget):
"""HDF5 Browser Tree Widget"""
SIG_SELECTED = QC.Signal(QW.QTreeWidgetItem)
def __init__(self, parent):
super().__init__(parent)
title = _("HDF5 Browser")
self.setColumnCount(4)
self.setWindowTitle(title)
self.setHeaderLabels([_("Name"), _("Size"), _("Type"), _("Textual preview")])
self.fname = None
self.h5importer = None
def setup(self, fname):
"""Setup H5TreeWidget"""
self.fname = osp.abspath(fname)
self.h5importer = H5Importer(self.fname)
self.clear()
self.populate_tree()
self.expandAll()
for col in range(3):
self.resizeColumnToContents(col)
def cleanup(self):
"""Clean up widget"""
self.h5importer.close()
self.h5importer = None
def get_node(self, item):
"""Get HDF5 dataset associated to item"""
node_id = item.data(0, QC.Qt.UserRole)
if node_id:
return self.h5importer.get(node_id)
return None
def get_nodes(self, only_checked_items=True):
"""Get all nodes associated to checked items"""
datasets = []
for item in self.find_all_items():
if item.flags() & QC.Qt.ItemIsUserCheckable:
if only_checked_items and item.checkState(0) == 0:
continue
if item is not self.topLevelItem(0):
node_id = item.data(0, QC.Qt.UserRole)
datasets.append(self.h5importer.get(node_id))
return datasets
def activated(self, item):
"""Double-click event"""
if item is not self.topLevelItem(0):
self.SIG_SELECTED.emit(item)
def clicked(self, item):
"""Click event"""
self.activated(item)
def get_actions_from_items(self, items): # pylint: disable=W0613
"""Get actions from item"""
return []
def is_empty(self):
"""Return True if tree is empty"""
return len(self.find_all_items()) == 1
def is_any_item_checked(self):
"""Return True if any item is checked"""
for item in self.find_all_items():
if item.checkState(0) > 0:
return True
return False
def select_all(self, state):
"""Select all items"""
for item in self.findItems("", QC.Qt.MatchContains | QC.Qt.MatchRecursive):
if item.flags() & QC.Qt.ItemIsUserCheckable:
item.setSelected(state)
if state:
self.clicked(item)
def toggle_all(self, state):
"""Toggle all item state from 'unchecked' to 'checked'
(or vice-versa)"""
for item in self.findItems("", QC.Qt.MatchContains | QC.Qt.MatchRecursive):
| if item.flags() & QC.Qt.ItemIsUserCheckable: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: humemarx/CPG-LCF
# Path: models/networks/backbone.py
class ConvModule(nn.Module):
"""A conv block that bundles conv/norm/activation layers.
This block simplifies the usage of convolution layers, which are commonly
used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).
It is based upon three build methods: `build_conv_layer()`,
`build_norm_layer()` and `build_activation_layer()`.
Besides, we add some additional features in this module.
1. Automatically set `bias` of the conv layer.
2. Spectral norm is supported.
3. More padding modes are supported. Before PyTorch 1.5, nn.Conv2d only
supports zero and circular padding, and we add "reflect" padding mode.
Args:
in_channels (int): Number of channels in the input feature map.
Same as that in ``nn._ConvNd``.
out_channels (int): Number of channels produced by the convolution.
Same as that in ``nn._ConvNd``.
kernel_size (int | tuple[int]): Size of the convolving kernel.
Same as that in ``nn._ConvNd``.
stride (int | tuple[int]): Stride of the convolution.
Same as that in ``nn._ConvNd``.
padding (int | tuple[int]): Zero-padding added to both sides of
the input. Same as that in ``nn._ConvNd``.
dilation (int | tuple[int]): Spacing between kernel elements.
Same as that in ``nn._ConvNd``.
groups (int): Number of blocked connections from input channels to
output channels. Same as that in ``nn._ConvNd``.
bias (bool | str): If specified as `auto`, it will be decided by the
norm_cfg. Bias will be set as True if `norm_cfg` is None, otherwise
False. Default: "auto".
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer. Default: None.
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
inplace (bool): Whether to use inplace mode for activation.
Default: True.
with_spectral_norm (bool): Whether use spectral norm in conv module.
Default: False.
padding_mode (str): If the `padding_mode` has not been supported by
current `Conv2d` in PyTorch, we will use our own padding layer
instead. Currently, we support ['zeros', 'circular'] with official
implementation and ['reflect'] with our own implementation.
Default: 'zeros'.
order (tuple[str]): The order of conv/norm/activation layers. It is a
sequence of "conv", "norm" and "act". Common examples are
("conv", "norm", "act") and ("act", "conv", "norm").
Default: ('conv', 'norm', 'act').
"""
_abbr_ = 'conv_block'
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int, int]],
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
groups: int = 1,
bias: bool = False,
conv_type=nn.Conv2d,
norm_type=None,
act_type=nn.ReLU,
inplace=True,
order: tuple = ('conv', 'norm', 'act')):
super().__init__()
self.order = order
self.conv_type = conv_type
self.act_type = act_type
self.norm_type = norm_type
self.with_norm = norm_type is not None
self.with_activation = act_type is not None
# build convolution layer
self.conv = conv_type(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
# build normalization layers
if self.with_norm:
norm = norm_type(out_channels) # type: ignore
self.norm_name = get_norm_name(norm_type)
self.add_module(self.norm_name, norm)
else:
self.norm_name = None # type: ignore
if self.with_activation:
self.activate = act_type(inplace=inplace)
self.init_weights()
@property
def norm(self):
if self.norm_name:
return getattr(self, self.norm_name)
else:
return None
def init_weights(self):
# 1. It is mainly for customized conv layers with their own
# initialization manners by calling their own ``init_weights()``,
# and we do not want ConvModule to override the initialization.
# 2. For customized conv layers without their own initialization
# manners (that is, they don't have their own ``init_weights()``)
# and PyTorch's conv layers, they will be initialized by
# this method with default ``kaiming_init``.
# Note: For PyTorch's conv layers, they will be overwritten by our
# initialization implementation using default ``kaiming_init``.
if not hasattr(self.conv, 'init_weights'):
if self.with_activation and isinstance(self.act_type, nn.LeakyReLU):
nonlinearity = 'leaky_relu'
a = 0.01
else:
nonlinearity = 'relu'
a = 0
kaiming_init(self.conv, a=a, nonlinearity=nonlinearity)
if self.with_norm:
constant_init(self.bn, 1, bias=0)
def forward(self,x):
for layer in self.order:
if layer == 'conv':
x = self.conv(x)
elif layer == 'norm' and self.with_norm:
x = self.norm(x)
elif layer == 'act' and self.with_activation:
x = self.activate(x)
return x
# Path: models/backbone2d/resnet.py
class BasicBlock(nn.Module):
"""Basic block for ResNet."""
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_type=nn.Conv2d,
norm_type=nn.BatchNorm2d,
dcn=None,
plugins=None,
zero_init_residual=True):
super().__init__()
assert dcn is None, 'Not implemented yet.'
assert plugins is None, 'Not implemented yet.'
self.zero_init_residual = zero_init_residual
norm1 = norm_type(planes)
self.norm1_name = get_norm_name(norm_type, postfix=1)
norm2 = norm_type(planes)
self.norm2_name = get_norm_name(norm_type, postfix=2)
self.conv1 = conv_type(
inplanes,
planes,
3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = conv_type(
planes,
planes,
3,
padding=1,
bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.with_cp = with_cp
@property
def norm1(self):
"""nn.Module: normalization layer after the first convolution layer"""
return getattr(self, self.norm1_name)
@property
def norm2(self):
"""nn.Module: normalization layer after the second convolution layer"""
return getattr(self, self.norm2_name)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.GroupNorm) or isinstance(m, _BatchNorm):
constant_init(m, val=1.0, bias=0.)
if self.zero_init_residual and getattr(m, 'norm2'):
constant_init(m, val=0.0, bias=0.)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
# Path: models/backbone2d/resnet.py
class Bottleneck(nn.Module):
"""Bottleneck block for ResNet.
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is
"caffe", the stride-two layer is the first 1x1 conv layer.
"""
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_type=nn.Conv2d,
norm_type=nn.BatchNorm2d,
dcn=None,
plugins=None,
zero_init_residual=True):
super().__init__()
assert style in ['pytorch', 'caffe']
assert dcn is None or issubclass(dcn, _ConvNd)
self.zero_init_residual = zero_init_residual
self.inplanes = inplanes
self.planes = planes
self.stride = stride
self.dilation = dilation
self.style = style
self.with_cp = with_cp
self.conv_type = conv_type
self.norm_type = norm_type
self.dcn = dcn
self.with_dcn = dcn is not None
if self.style == 'pytorch':
self.conv1_stride = 1
self.conv2_stride = stride
else:
self.conv1_stride = stride
self.conv2_stride = 1
norm1 = norm_type(planes)
self.norm1_name = get_norm_name(norm_type, postfix=1)
norm2 = norm_type(planes)
self.norm2_name = get_norm_name(norm_type, postfix=2)
norm3 = norm_type(planes*self.expansion)
self.norm3_name = get_norm_name(norm_type, postfix=3)
self.conv1 = conv_type(
inplanes,
planes,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = dcn.pop('fallback_on_stride', False)
if not self.with_dcn or fallback_on_stride:
self.conv2 = conv_type(
planes,
planes,
kernel_size=3,
stride=self.conv2_stride,
padding=dilation,
dilation=dilation,
bias=False)
else:
assert self.conv_type is None, 'conv_cfg must be None for DCN'
self.conv2 = dcn(
planes,
planes,
kernel_size=3,
stride=self.conv2_stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = conv_type(
planes,
planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
@property
def norm1(self):
"""nn.Module: normalization layer after the first convolution layer"""
return getattr(self, self.norm1_name)
@property
def norm2(self):
"""nn.Module: normalization layer after the second convolution layer"""
return getattr(self, self.norm2_name)
@property
def norm3(self):
"""nn.Module: normalization layer after the third convolution layer"""
return getattr(self, self.norm3_name)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.GroupNorm) or isinstance(m, _BatchNorm):
constant_init(m, val=1.0, bias=0.)
if self.zero_init_residual and getattr(m, 'norm3'):
constant_init(m, val=0.0, bias=0.)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
# Path: models/backbone2d/resnet.py
def get_norm_name(norm_type, postfix=1):
if issubclass(norm_type, _InstanceNorm): # IN is a subclass of BN
return 'in{}'.format(postfix)
elif issubclass(norm_type, _BatchNorm):
return 'bn{}'.format(postfix)
elif issubclass(norm_type, nn.GroupNorm):
return 'gn{}'.format(postfix)
elif issubclass(norm_type, nn.LayerNorm):
return 'ln{}'.format(postfix)
# Path: utils/config_parser.py
def get_module(config=None, *args, **kwargs):
import models
import datasets
if config != None:
if type(config) != dict:
config = class2dic(config)
for key in config:
kwargs[key] = config[key]
assert 'type' in kwargs
method_code = eval(kwargs['type'])
args_count = method_code.__init__.__code__.co_argcount
input_params = method_code.__init__.__code__.co_varnames[1:args_count]
new_kwargs = {}
for i, value in enumerate(args):
new_kwargs[input_params[i]] = value
for key in kwargs:
if key in input_params:
new_kwargs[key] = kwargs[key]
result_module = method_code(**new_kwargs)
return result_module
# Path: models/utils/wrappers.py
class Upsample(nn.Module):
def __init__(self,
size=None,
scale_factor=None,
mode='nearest',
align_corners=None):
super().__init__()
self.size = size
if isinstance(scale_factor, tuple):
self.scale_factor = tuple(float(factor) for factor in scale_factor)
else:
self.scale_factor = float(scale_factor) if scale_factor else None
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
if not self.size:
size = [int(t * self.scale_factor) for t in x.shape[-2:]]
else:
size = self.size
return resize(x, size, None, self.mode, self.align_corners)
# Path: models/utils/wrappers.py
def resize(input,
size=None,
scale_factor=None,
mode='nearest',
align_corners=None,
warning=True):
if warning:
if size is not None and align_corners:
input_h, input_w = tuple(int(x) for x in input.shape[2:])
output_h, output_w = tuple(int(x) for x in size)
if output_h > input_h or output_w > output_h:
if ((output_h > 1 and output_w > 1 and input_h > 1
and input_w > 1) and (output_h - 1) % (input_h - 1)
and (output_w - 1) % (input_w - 1)):
warnings.warn(
f'When align_corners={align_corners}, '
'the output would more aligned if '
f'input size {(input_h, input_w)} is `x+1` and '
f'out size {(output_h, output_w)} is `nx+1`')
return F.interpolate(input, size, scale_factor, mode, align_corners)
# Path: models/backbone2d/hrnet.py
import warnings
import torch.nn as nn
import torch
from models.networks.backbone import ConvModule
from models.backbone2d.resnet import BasicBlock, Bottleneck, get_norm_name
from utils.config_parser import get_module
from models.utils import resize, Upsample
from torch.nn.modules.batchnorm import _BatchNorm
from collections import OrderedDict
self.norm_type(planes * block.expansion))
layers = []
layers.append(
block(
inplanes,
planes,
stride,
downsample=downsample,
with_cp=self.with_cp,
norm_type=self.norm_type,
conv_type=self.conv_type))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
inplanes,
planes,
with_cp=self.with_cp,
norm_type=self.norm_type,
conv_type=self.conv_type))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, in_channels, multiscale_output=True):
"""Make each stage."""
num_modules = layer_config['num_modules']
num_branches = layer_config['num_branches']
num_blocks = layer_config['num_blocks']
num_channels = layer_config['num_channels']
block = self.blocks_dict[layer_config['block']]
hr_modules = []
for i in range(num_modules):
# multi_scale_output is only used for the last module
if not multiscale_output and i == num_modules - 1:
reset_multiscale_output = False
else:
reset_multiscale_output = True
hr_modules.append(
HRModule(
num_branches,
block,
num_blocks,
in_channels,
num_channels,
reset_multiscale_output,
with_cp=self.with_cp,
norm_type=self.norm_type,
conv_type=self.conv_type))
return nn.Sequential(*hr_modules), in_channels
def _freeze_stages(self):
"""Freeze stages param and norm stats."""
if self.frozen_stages >= 0:
self.norm1.eval()
self.norm2.eval()
for m in [self.conv1, self.norm1, self.conv2, self.norm2]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
if i == 1:
m = getattr(self, f'layer{i}')
t = getattr(self, f'transition{i}')
elif i == 4:
m = getattr(self, f'stage{i}')
else:
m = getattr(self, f'stage{i}')
t = getattr(self, f'transition{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
t.eval()
for param in t.parameters():
param.requires_grad = False
def forward(self, x):
"""Forward function."""
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.norm2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['num_branches']):
if self.transition1[i] is not None:
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['num_branches']):
if self.transition2[i] is not None:
x_list.append(self.transition2[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['num_branches']):
if self.transition3[i] is not None:
x_list.append(self.transition3[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage4(x_list)
return y_list
def train(self, mode=True):
"""Convert the model into training mode will keeping the normalization
layer freezed."""
| super().train(mode) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: lalalamdbf/PLSE_IDRR
# Path: src/prompt-tuning/prompt/data_utils.py
class InputFeatures(dict):
"""
The class for input to the PLM and Prompts. To make users explicitly know the available keys,
we define a dict with a set of predefined possible keys. The default value to any key is None.
When use it as a dict, all the keys whose values are None are invisible.
This class support most of the dict's operation (See Examples). It can also be consumed by
pytorch's default_collate in DataLoader.
Also a :py:meth:`to_tensor()` method is build to convert the values into torch.Tensor for torch's input.
Examples:
.. code-block:: python
in_feat = InputFeatures(**{'input_ids':[1,4,5], 'soft_token_ids': [3,4,5]}) # init from dict
print(in_feat.keys()) # ['input_ids, 'soft_token_ids']
in_feat['label'] = 3 # can assign value like normal dict
print(in_feat.keys()) # ['input_ids','label', 'soft_token_ids'] (Note that it's also ordered)
print(in_feat['label']) # 3
in_feat['alice'] = 0 # KeyError: Key alice not in predefined set of keys
in_feat.values() # [[1,4,5], 3, [3,4,5]] (Note that it's also ordered)
[in_feat[key] for key in in_feat] # [[1,4,5], 3, [3,4,5]]
new_dict= {**in_feat, 'new_key':2} # new_dict is {'input_ids': [1, 4, 5], 'label': 3, 'soft_token_ids': [3, 4, 5], 'new_key': 2}
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``: Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded)
tokens.
token_type_ids: (Optional) Segment token indices to indicate first and second
portions of the inputs. Only some models use them.
label: (Optional) Label corresponding to the input. Int for classification problems,
float for regression problems.
"""
tensorable_keys = ['input_ids', 'inputs_embeds', 'attention_mask', 'token_type_ids', 'label',
'decoder_input_ids', 'decoder_inputs_embeds', 'soft_token_ids',
'past_key_values', 'loss_ids','conns_index']
all_keys = ['input_ids', 'inputs_embeds', 'attention_mask', 'token_type_ids', 'label',
'decoder_input_ids', 'decoder_inputs_embeds', 'soft_token_ids',
'past_key_values', 'loss_ids','guid', 'tgt_text', 'encoded_tgt_text', 'input_ids_len','conns_index']
non_tensorable_keys = []
def __init__(self,
input_ids: Optional[Union[List, torch.Tensor]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
attention_mask: Optional[Union[List[int], torch.Tensor]] = None,
token_type_ids: Optional[Union[List[int], torch.Tensor]] = None,
label: Optional[Union[int, torch.Tensor]] = None,
decoder_input_ids: Optional[Union[List, torch.Tensor]] = None,
decoder_inputs_embeds: Optional[torch.Tensor] = None,
soft_token_ids: Optional[Union[List, torch.Tensor]] = None,
past_key_values: Optional[torch.Tensor] = None, # for prefix_tuning
loss_ids: Optional[Union[List, torch.Tensor]] = None,
guid: Optional[str] = None,
tgt_text: Optional[str] = None,
use_cache: Optional[bool] = None,
encoded_tgt_text: Optional[str] = None,
input_ids_len: Optional[int] = None,
conns_index = None,
**kwargs):
self.input_ids = input_ids
self.inputs_embeds = inputs_embeds
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.label = label
self.decoder_input_ids = decoder_input_ids
self.decoder_inputs_embeds = decoder_inputs_embeds
self.soft_token_ids = soft_token_ids
self.past_key_values = past_key_values
self.loss_ids = loss_ids
self.guid = guid
self.tgt_text = tgt_text
self.encoded_tgt_text = encoded_tgt_text
self.use_cache = use_cache
self.input_ids_len = input_ids_len
self.conns_index = conns_index
for k in kwargs.keys():
setattr(self, k, kwargs[k])
@classmethod
def add_tensorable_keys(cls, *args):
cls.tensorable_keys.extend(args)
@classmethod
def add_not_tensorable_keys(cls, *args):
cls.not_tensorable_keys.extend(args)
@classmethod
def add_keys(cls, *args):
cls.all_keys.extend(args)
def __repr__(self):
return str(self.to_json_string())
def __len__(self):
return len(self.keys())
def to_tensor(self, device: str = 'cuda'):
"""inplace operation, convert all tensorable features into :obj:`torch.tensor`"""
for key in self.tensorable_keys:
value = getattr(self, key)
if value is not None:
setattr(self, key, torch.tensor(value))
return self
def to(self, device: str = "cuda:0"):
r"""move the tensor keys to runtime device, such as gpu:0
"""
for key in self.tensorable_keys:
value = getattr(self, key)
if value is not None:
setattr(self, key, value.to(device))
return self
def cuda(self, device: str = "cuda:0"):
r"""mimic the tensor behavior
"""
return self.to(device)
def to_json_string(self, keep_none=False):
"""Serializes this instance to a JSON string."""
data = {}
for key in self.all_keys:
value = getattr(self, key)
if isinstance(value, torch.Tensor):
data[key] = value.detach().cpu().tolist()
elif value is None and keep_none:
data[key] = None
else:
data[key] = value
return json.dumps(data) + "\n"
def keys(self, keep_none=False) -> List[str]:
"""get all keys of the InputFeatures
Args:
keep_none (:obj:`bool`, optional): whether to keep the predefined keys whose value is none. Defaults to False.
Returns:
:obj:`List[str]`: keys of the InputFeatures
"""
if keep_none:
return self.all_keys
else:
return [key for key in self.all_keys if getattr(self, key) is not None]
def to_dict(self, keep_none=False) -> Dict[str, Any]:
"""get the dict of mapping from keys to values of the InputFeatures
Args:
keep_none (:obj:`bool`, optional): whether to keep the predefined keys whose value is none. Defaults to False.
Returns:
:obj:`Dict[str, Any]`: dict of mapping from keys to values of the InputFeatures
"""
data = {}
for key in self.all_keys:
value = getattr(self, key)
if value is not None:
data[key] = value
elif value is None and keep_none:
data[key] = None
return data
def __getitem__(self, key):
return getattr(self, key)
def __iter__(self):
return iter(self.keys())
def __setitem__(self, key, item):
if key not in self.all_keys:
raise KeyError("Key {} not in predefined set of keys".format(key))
setattr(self, key, item)
def values(self, keep_none=False) -> List[Any]:
"""get the values with respect to the keys of the InputFeatures
Args:
keep_none (:obj:`bool`, optional): whether to keep the predefined keys whose value is none. Defaults to False.
Returns:
:obj:`List[Any]`: the values with respect to the keys of the InputFeatures
"""
return [getattr(self, key) for key in self.keys(keep_none=keep_none)]
def __contains__(self, key, keep_none=False):
return key in self.keys(keep_none)
def items(self,):
"""get the (key, value) pairs of the InputFeatures
Args:
keep_none (:obj:`bool`, optional): whether to keep the predefined keys whose value is none. Defaults to False.
Returns:
:obj:`List[Any]`: the (key, value) pairs of the InputFeatures
"""
return [(key, self.__getitem__(key)) for key in self.keys()]
@staticmethod
def collate_fct(batch: List):
r'''
This function is used to collate the input_features.
Args:
batch (:obj:`List[Union[Dict, InputFeatures]]`): A batch of the current data.
Returns:
:obj:`InputFeatures`: Return the :py:class:`~openprompt.data_utils.data_utils.InputFeatures of the current batch of data.
'''
elem = batch[0]
return_dict = {}
for key in elem:
if key == "encoded_tgt_text":
return_dict[key] = [d[key] for d in batch]
else:
try:
return_dict[key] = default_collate([d[key] for d in batch])
except:
print(f"key{key}\n d {[batch[i][key] for i in range(len(batch))]} ")
return InputFeatures(**return_dict)
# Path: src/prompt-tuning/prompt/data_utils.py
class InputExample(object):
"""A raw input example consisting of segments of text,
a label for classification task or a target sequence of generation task.
Other desired information can be passed via meta.
Args:
guid (:obj:`str`, optional): A unique identifier of the example.
text_a (:obj:`str`, optional): The placeholder for sequence of text.
text_b (:obj:`str`, optional): A secend sequence of text, which is not always necessary.
label (:obj:`int`, optional): The label id of the example in classification task.
tgt_text (:obj:`Union[str,List[str]]`, optional): The target sequence of the example in a generation task..
meta (:obj:`Dict`, optional): An optional dictionary to store arbitrary extra information for the example.
"""
def __init__(self,
guid = None,
text_a = "",
text_b = "",
label = None,
meta: Optional[Dict] = None,
tgt_text: Optional[Union[str,List[str]]] = None
):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
self.meta = meta if meta else {}
self.tgt_text = tgt_text
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
r"""Serialize this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
r"""Serialize this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def keys(self, keep_none=False):
return [key for key in self.__dict__.keys() if getattr(self, key) is not None]
@staticmethod
def load_examples(path: str) -> List['InputExample']:
"""Load a set of input examples from a file"""
with open(path, 'rb') as fh:
return pickle.load(fh)
@staticmethod
def save_examples(examples: List['InputExample'], path: str) -> None:
"""Save a set of input examples to a file"""
with open(path, 'wb') as fh:
pickle.dump(examples, fh)
# Path: src/prompt-tuning/prompt/prompt_base.py
from abc import abstractmethod
from transformers.file_utils import ModelOutput
from transformers.utils.dummy_pt_objects import PreTrainedModel
from .data_utils import InputFeatures, InputExample
from typing import *
from transformers.tokenization_utils import PreTrainedTokenizer
import json
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
import traceback
class Template(nn.Module):
r'''
Base class for all the templates.
Most of methods are abstract, with some exceptions to hold the common methods for all template, such as ``loss_ids``, ``save``, ``load``.
Args:
tokenizer (:obj:`PreTrainedTokenizer`): A tokenizer to appoint the vocabulary and the tokenization strategy.
placeholder_mapping (:obj:`dict`): A place holder to represent the original input text.
'''
registered_inputflag_names = ["loss_ids", "shortenable_ids"]
def __init__(self,
tokenizer: PreTrainedTokenizer,
placeholder_mapping: dict = {'<text_a>':'text_a','<text_b>':'text_b'},
):
super().__init__()
self.tokenizer = tokenizer
self.placeholder_mapping = placeholder_mapping
self._in_on_text_set = False
self.mixed_token_start = "{"
self.mixed_token_end = "}"
def get_default_loss_ids(self) -> List[int]:
'''Get the loss indices for the template using mask.
e.g. when self.text is ``'{"placeholder": "text_a"}. {"meta": "word"} is {"mask"}.'``,
output is ``[0, 0, 0, 0, 1, 0]``.
Returns:
:obj:`List[int]`: A list of integers in the range [0, 1]:
- 1 for a masked tokens.
- 0 for a sequence tokens.
'''
return [1 if 'mask' in d else 0 for d in self.text]
def get_default_shortenable_ids(self) -> List[int]:
"""Every template needs shortenable_ids, denoting which part of the template can be truncate to fit
the language model's ``max_seq_length``. Default: the input text is shortenable, while the template text and other
special tokens are not shortenable.
e.g. when self.text is ``'{"placeholder": "text_a"} {"placeholder": "text_b", "shortenable": False} {"meta": "word"} is {"mask"}.'``,
output is ``[1, 0, 0, 0, 0, 0, 0]``.
Returns:
:obj:`List[int]`: A list of integers in the range ``[0, 1]``:
- 1 for the input tokens.
- 0 for the template sequence tokens.
"""
idx = []
for d in self.text:
if 'shortenable' in d:
idx.append(1 if d['shortenable'] else 0)
else:
idx.append(1 if 'placeholder' in d else 0)
return idx
def get_default_soft_token_ids(self) -> List[int]:
r'''
This function identifies which tokens are soft tokens.
Sometimes tokens in the template are not from the vocabulary,
but a sequence of soft tokens.
In this case, you need to implement this function
Raises:
| NotImplementedError: if needed, add ``soft_token_ids`` into ``registered_inputflag_names`` attribute of Template class and implement this method. |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: HealthSciTech/E2E-PPG
# Path: utils.py
def get_data(
file_name: str,
local_directory: str = "data",
usecols: List[str] = ['ppg'],
) -> np.ndarray:
"""
Import data (e.g., PPG signals)
Args:
file_name (str): Name of the input file
local_directory (str): Data directory
usecols (List[str]): The columns to read from the input file
Return:
sig (np.ndarray): the input signal (e.g., PPG)
"""
try:
# Construct the file path
file_path = os.path.join(local_directory, file_name)
# Load data from the specified CSV file
input_data = pd.read_csv(
file_path,
delim_whitespace=True,
usecols=usecols)
# Extract signal
sig = input_data[usecols[0]].values
return sig
except FileNotFoundError:
print(f"File not found: {file_name}")
except pd.errors.EmptyDataError:
print(f"Empty data in file: {file_name}")
except Exception as e:
print(f"An unexpected error occurred: {e}")
# Return None in case of an error
return None
# Path: ppg_sqa.py
def sqa(
sig: np.ndarray,
sampling_rate: int,
filter_signal: bool = True,
) -> Tuple[list, list]:
"""
Perform PPG Signal Quality Assessment (SQA).
This function assesses the quality of a PPG signal by classifying its segments
as reliable (clean) or unrelaible (noisy) using a pre-trained model.
The clean indices represent parts of the PPG signal that are deemed reliable,
while the noisy indices indicate parts that may be affected by noise or artifacts.
Args:
sig (np.ndarray): PPG signal.
sampling_rate (int): Sampling rate of the PPG signal.
filter_signal (bool): True if the signal has not filtered using
a bandpass filter.
Return:
clean_indices: A list of clean indices.
noisy_indices: A list of noisy indices.
Reference:
Feli, M., Azimi, I., Anzanpour, A., Rahmani, A. M., & Liljeberg, P. (2023).
An energy-efficient semi-supervised approach for on-device photoplethysmogram signal quality assessment.
Smart Health, 28, 100390.
"""
# Load pre-trained model and normalization scaler
scaler = joblib.load(os.path.join(MODEL_PATH, SCALER_FILE_NAME))
model = pickle.load(
open(os.path.join(MODEL_PATH, SQA_MODEL_FILE_NAME), 'rb'))
resampling_flag = False
# Check if resampling is needed and perform resampling if necessary
if sampling_rate != SQA_MODEL_SAMPLING_FREQUENCY:
sig = resample_signal(
sig=sig, fs_origin=sampling_rate, fs_target=SQA_MODEL_SAMPLING_FREQUENCY)
resampling_flag = True
resampling_rate = sampling_rate/SQA_MODEL_SAMPLING_FREQUENCY
sampling_rate = SQA_MODEL_SAMPLING_FREQUENCY
# Apply bandpass filter if needed
if filter_signal:
sig = bandpass_filter(
sig=sig, fs=sampling_rate, lowcut=0.5, highcut=3)
# Generate indices for the PPG signal
sig_indices = np.arange(len(sig))
# Segment the PPG signal into
segments, segments_indices = segmentation(
sig=sig,
sig_indices=sig_indices,
sampling_rate=sampling_rate,
method='shifting',
segment_size=SEGMENT_SIZE,
shift_size=SHIFTING_SIZE,
)
# Initialize lists to store all reliable and unreliable segments
reliable_segments_all = []
unreliable_segments_all = []
reliable_indices_all = []
unreliable_indices_all = []
# Loop through the segments for feature extraction and classification
for idx, segment in enumerate(segments):
# Feature extraction
features = feature_extraction(segment, sampling_rate)
# Classification
if np.isnan(np.array(features)).any():
pred = 1
else:
features_norm = scaler.transform([features])
pred = model.predict(features_norm)
# Categorize segments based on classification result
if pred == 0:
reliable_segments_all.append(segment)
reliable_indices_all.append(segments_indices[idx])
else:
unreliable_segments_all.append(segment)
unreliable_indices_all.append(segments_indices[idx])
# Generate flatten lists of reliable indices as clean indices
clean_indices = list(set([item for segment in reliable_indices_all for item in segment]))
# The indices that dont exist in the flat list of clean indices indicate unreliable indices
unreliable_indices = [item for item in sig_indices if item not in clean_indices]
# Unflat the unreliable_indices list to separte noisy parts
noisy_indices = []
for group in mit.consecutive_groups(unreliable_indices):
noisy_indices.append(list(group))
noisy_indices = [noisy_indices[i] for i in range(
len(noisy_indices)) if len(noisy_indices[i]) > SHIFTING_SIZE]
# If resampling performed, update indices according to the original sampling rate
if resampling_flag:
clean_indices = [int(index * resampling_rate) for index in clean_indices]
noisy_indices = [[int(index * resampling_rate) for index in noise] for noise in noisy_indices]
return clean_indices, noisy_indices
# Path: ppg_reconstruction.py
def reconstruction(
sig: np.ndarray,
clean_indices: list,
noisy_indices:list,
sampling_rate: int,
filter_signal: bool = True,
) -> Tuple[np.ndarray, list, list]:
'''
Reconstruct noisy PPG signals using GAN.
Args:
sig (np.ndarray): Original PPG signal.
clean_indices (list): List of indices representing clean parts.
noisy_indices (list): List of indices representing noisy parts.
sampling_rate (int): Sampling rate of the signal.
filter_signal (bool): True if the signal has not filtered using
a bandpass filter.
Return:
ppg_signal (np.ndarray): Reconstructed PPG signal (if reconstruction is
applied; otherwise, returns the original signal).
clean_indices (list): Updated indices of clean parts (if reconstruction is
applied; otherwise, returns the original indices of clean parts).
noisy_indices (list): Updated indices of noisy parts (if reconstruction is
applied; otherwise, returns the original indices of noisy parts).
Reference:
Wang, Y., Azimi, I., Kazemi, K., Rahmani, A. M., & Liljeberg, P. (2022, July).
Ppg signal reconstruction using deep convolutional generative adversarial network.
In 2022 44th Annual International Conference of the IEEE Engineering in Medicine & Biology Society (EMBC) (pp. 3387-3391). IEEE.
'''
# Set the Generator class in the main module for compatibility with the saved GAN model
setattr(__main__, "Generator", Generator)
# Load GAN model parameters
generator = torch.load(os.path.join(
MODEL_PATH, GAN_MODEL_FILE_NAME), map_location=torch.device('cpu'))
device = torch.device(
"cuda:0" if torch.cuda.is_available() else "cpu")
resampling_flag = False
# Check if resampling is needed and perform resampling if necessary
if sampling_rate != RECONSTRUCTION_MODEL_SAMPLING_FREQUENCY:
sig = resample_signal(
sig=sig, fs_origin=sampling_rate, fs_target=RECONSTRUCTION_MODEL_SAMPLING_FREQUENCY)
resampling_flag = True
resampling_rate = sampling_rate/RECONSTRUCTION_MODEL_SAMPLING_FREQUENCY
sampling_rate_original = sampling_rate
sampling_rate = RECONSTRUCTION_MODEL_SAMPLING_FREQUENCY
# Apply bandpass filter if needed
if filter_signal:
sig = bandpass_filter(
sig=sig, fs=sampling_rate, lowcut=0.5, highcut=3)
# Scale the original PPG signal for further processing
sig_scaled= preprocessing.scale(sig)
# Maximum length for reconstruction
max_rec_length = int(MAX_RECONSTRUCTION_LENGTH_SEC*sampling_rate)
# Flag to indicate if reconstruction has occurred
reconstruction_flag = False
# Iterate over noisy parts for reconstruction
for noise in noisy_indices:
if len(noise) <= max_rec_length:
noise_start_idx = noise[0]
# Check if there is sufficient preceding clean signal for reconstruction
if noise_start_idx >= max_rec_length:
# Check if the preceding signal is clean
if set(range(
noise_start_idx - max_rec_length,
noise_start_idx)).issubset(clean_indices):
# Perform noise reconstruction for the current noise
reconstructed_noise = gan_rec(
sig[noise_start_idx-max_rec_length:noise_start_idx],
noise, sampling_rate, generator, device)
# Upsample the reconstructed noise
reconstructed_noise_res = resample(
reconstructed_noise,
int(len(reconstructed_noise)*UPSAMPLING_RATE))
# Upsample the clean signal before the noise
sig_before_noise_res = resample(
sig_scaled[:noise_start_idx],
int(len(sig_scaled[:noise_start_idx])*UPSAMPLING_RATE))
# Upsample the clean signal after the noise
sig_after_noise_res = resample(
sig_scaled[noise[-1]:],
int(len(sig_scaled[noise[-1]:])*UPSAMPLING_RATE))
# Find peaks in the clean signal before the noise
peaks_sig_before_noise, _ = find_peaks(
sig_before_noise_res,
int(sampling_rate*UPSAMPLING_RATE))
# Check if the reconstructed noise is long enough
# (considering a threshold of 2 seconds)
if len(reconstructed_noise_res) >= 2*sampling_rate*UPSAMPLING_RATE:
try:
# Find peaks in the reconstructed noise
peaks_noise_rec, _ = find_peaks(
reconstructed_noise_res,
int(sampling_rate*UPSAMPLING_RATE))
# Check if the clean signal after the noise is long enough
# (considering a threshold of 2 seconds)
if len(sig_after_noise_res) >= 2*sampling_rate*UPSAMPLING_RATE:
# Find peaks in the clean signal after the noise
peaks_sig_after_noise, _ = find_peaks(
sig_after_noise_res,
int(sampling_rate*UPSAMPLING_RATE))
# Merge the reconstructed noise with the clean signal
sig_res = list(sig_before_noise_res[:peaks_sig_before_noise[-1]]) + \
list(reconstructed_noise_res[peaks_noise_rec[0]:peaks_noise_rec[-1]]) + \
list(sig_after_noise_res[peaks_sig_after_noise[0]:])
# If the clean signal after the noise is too short, there is no need
# for peak detection
else:
# Merge the reconstructed noise with the clean signal
sig_res = list(sig_before_noise_res[:peaks_sig_before_noise[-1]]) + \
list(reconstructed_noise_res[peaks_noise_rec[0]:peaks_noise_rec[-1]]) + \
list(sig_after_noise_res)
except:
continue
else:
try:
# Check if the clean signal after the noise is long enough
# (considering a threshold of 2 seconds)
if len(sig_after_noise_res) >= 2*sampling_rate*UPSAMPLING_RATE:
# Find peaks in the clean signal after the noise
peaks_sig_after_noise, _ = find_peaks(
sig_after_noise_res,
int(sampling_rate*UPSAMPLING_RATE))
# Merge the reconstructed noise with the clean signal
sig_res = list(sig_before_noise_res[:peaks_sig_before_noise[-1]]) + \
list(reconstructed_noise_res) + \
list(sig_after_noise_res[peaks_sig_after_noise[0]:])
# If the clean signal after the noise is too short, there is no need
# for peak detection
else:
# Merge the reconstructed noise with the clean signal
sig_res = list(sig_before_noise_res[:peaks_sig_before_noise[-1]]) + \
list(reconstructed_noise_res) + \
list(sig_after_noise_res)
except:
continue
# Resample the reconstructed signal to the original length of the signal
sig_scaled= resample(sig_res, len(sig_scaled))
# Descale the reconstructed signal
ppg_descaled = (sig_scaled*np.std(sig)) + np.mean(sig)
# Set the reconstruction flag to True
reconstruction_flag = True
# Perform the signal quality assessment to ensure that the reconstructed
# signal is not distorted
clean_indices, noisy_indices = sqa(
sig=ppg_descaled, sampling_rate=sampling_rate, filter_signal=False)
# Check if there was a reconstruction
if reconstruction_flag:
ppg_signal = ppg_descaled
else:
ppg_signal = sig
# If resampling performed, update the reconstructed signal and indices according to the original sampling rate
if resampling_flag:
clean_indices = [int(index * resampling_rate) for index in clean_indices]
noisy_indices = [[int(index * resampling_rate) for index in noise] for noise in noisy_indices]
ppg_signal = resample_signal(
sig=ppg_signal, fs_origin=sampling_rate, fs_target=sampling_rate_original)
# Return the reconstructed or original PPG signal, along with updated indices
return ppg_signal, clean_indices, noisy_indices
# Path: ppg_clean_extraction.py
def clean_seg_extraction(
sig: np.ndarray,
noisy_indices: list,
window_length: int
) -> list:
"""
Scan the clean parts of the signal and extract clean segments based on the input window length.
Args:
sig (numpy.ndarray): Input PPG signal.
noisy_indices (list): List of noisy segment indices.
window_length (int): Desired window length for clean segment extraction in terms of samples.
Return:
clean_segments (list): List of clean PPG segments with the specified window length and their starting index.
"""
def find_clean_parts(quality_lst:list) -> list:
'''
Scan the quality vector and find the start and end indices of clean parts.
Args:
quality_lst (list): Quality vector of the signal (0 indictes clean and 1 indicates noisy)
Return:
start_end_clean (list): Start and end indices of the clean parts in a list of tuples
'''
start_end_clean = []
start = 0
for i in range(len(quality_lst)-1):
if quality_lst[start] == quality_lst[i+1]:
if i+1 == len(quality_lst)-1:
end = i+1
if quality_lst[start] == 0:
start_end_clean.append((start,end))
else:
continue
else:
end = i
if quality_lst[start] == 0:
start_end_clean.append((start,end))
start = i+1
return start_end_clean
# Create a new DataFrame to store PPG, and quality information
quality_df = pd.DataFrame(columns=['ppg','quality'])
# Flatten the noise indices list
flat_list_noise = [item for noise in noisy_indices for item in noise]
# Define a quality vector (0 indictes clean and 1 indicates noisy)
quality = [1 if i in flat_list_noise else 0 for i in range(len(sig))]
# Store ppg signal with quality vector in dataframe
quality_df['quality'] = quality
quality_df['ppg'] = sig
# Find start and end indices of clean parts in the quality list
start_end_clean_idx = find_clean_parts(quality_df['quality'].tolist())
# Initialize a list to store total clean segments with the specified window length
clean_segments = []
# Extract clean segments based on window length
for indices in start_end_clean_idx:
# Check if the current clean part has the required window length
if (indices[1] - indices[0]) >= window_length:
# Select the current clean part
clean_part = quality_df['ppg'][indices[0] : indices[1]].tolist()
# Calculate the number of segments with the specified window length that can be extarcted from the current clean part
num_segments = len(clean_part) // window_length
# Extract clean segment with the specified window length from current clean part and their starting indices
segments = [((indices[0] + i * window_length), clean_part[i * window_length: (i + 1) * window_length]) for i in range(num_segments)]
# Add extracted segments to total clean segments
clean_segments.extend(segments)
return clean_segments
# Path: ppg_peak_detection.py
def peak_detection(
clean_segments: list,
sampling_rate: int,
method: str ='kazemi') -> list:
'''
Detect peaks in clean PPG segments using specified peak detection method.
Args:
clean_segments (list): List of clean PPG segments with the specified window length and their starting index.
sampling_rate: Sampling rate of the PPG signal.
method (str): Peak detection method. Valid inputs: 'nk', 'kazemi', and 'heartpy'. The default is 'kazemi'. (optional)
Return:
total_peaks (list): List of lists, each containing the detected peaks for a corresponding clean segment.
Refernces:
Kazemi method: Kazemi, K., Laitala, J., Azimi, I., Liljeberg, P., & Rahmani, A. M. (2022).
Robust ppg peak detection using dilated convolutional neural networks. Sensors, 22(16), 6054.
Neurokit method: Makowski, D., Pham, T., Lau, Z. J., Brammer, J. C., Lespinasse, F., Pham, H., ... & Chen, S. A. (2021).
NeuroKit2: A Python toolbox for neurophysiological signal processing. Behavior research methods, 1-8.
HeartPY method: Van Gent, P., Farah, H., Nes, N., & van Arem, B. (2018, June).
Heart rate analysis for human factors: Development and validation of an open source toolkit for noisy naturalistic heart rate data.
In Proceedings of the 6th HUMANIST Conference (pp. 173-178).
'''
# Initialize a list to store total peaks
total_peaks = []
# Check the deisred peak detection method
if method == 'nk':
# Neurokit method
upsampling_rate = 2
sampling_rate_new = sampling_rate * upsampling_rate
for i in range(len(clean_segments)):
# Normalize PPG signal
ppg_normed = normalize_data(clean_segments[i][1])
# Upsampling the signal
resampled = signal.resample(ppg_normed, len(ppg_normed) * upsampling_rate)
# Perform peak detection
ppg_cleaned = nk.ppg_clean(resampled, sampling_rate=sampling_rate_new)
info = nk.ppg_findpeaks(ppg_cleaned, sampling_rate=sampling_rate_new)
peaks = info["PPG_Peaks"]
# Update peak indices according to the original sampling rate
peaks = (peaks // upsampling_rate).astype(int)
# Add peaks of the current segment to the total peaks
total_peaks.append(peaks)
# Return total peaks
return total_peaks
elif method == 'kazemi':
# Kazemi method
for i in range(len(clean_segments)):
# Perform peak detection
peaks = ppg_peaks(np.asarray(clean_segments[i][1]), sampling_rate, seconds = 15, overlap = 0, minlen = 15)
# Add peaks of the current segment to the total peaks
total_peaks.append(peaks)
# Return total peaks
return total_peaks
elif method == 'heartpy':
# HeartPy method
for i in range(len(clean_segments)):
# Perform peak detection
rol_mean = rolling_mean(clean_segments[i][1], windowsize = 0.75, sample_rate = sampling_rate)
wd = hp.peakdetection.detect_peaks(np.array(clean_segments[i][1]), rol_mean, ma_perc = 20, sample_rate = sampling_rate)
peaks = wd['peaklist']
# Add peaks of the current segment to the total peaks
total_peaks.append(peaks)
# Return total peaks
return total_peaks
else:
print("Invalid method. Please choose from 'neurokit', 'kazemi', or 'heartpy'")
return None
# Path: ppg_hrv_extraction.py
import warnings
import neurokit2 as nk
import pandas as pd
import numpy as np
from utils import get_data
from ppg_sqa import sqa
from ppg_reconstruction import reconstruction
from ppg_clean_extraction import clean_seg_extraction
from ppg_peak_detection import peak_detection
# -*- coding: utf-8 -*-
warnings.filterwarnings("ignore")
def hrv_parameters(
peaks: np.ndarray,
| seg_start_idx: int, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Antelcat/ida_copilot
# Path: ida_copilot/panel.py
class Singleton(type):
class CopilotPanel(idaapi.PluginForm, metaclass=Singleton):
class CopilotPanelCallbackManager(BaseCallbackHandler):
class ShowCopilotPanel(idaapi.action_handler_t):
def __call__(cls, *args, **kwargs):
def __init__(self):
def OnCreate(self, form):
def OnClose(self, form):
def Show(self, **kwargs):
def on_text(self, text: str, **kwargs):
def __init__(self, panel):
def activate(self, ctx):
def update(self, ctx):
# Path: ida_copilot/copilot.py
class Copilot:
def run(self, temperature=0.2, model='gpt-3.5-turbo-0613'):
ea = idaapi.get_screen_ea()
func_name = idaapi.get_func_name(ea)
tools = [
self.__GetAddressInfoTool(),
self.__GetDefinitionTool(),
self.__GetPseudocodeTool(),
self.__SetFunctionCommentTool(),
self.__SetFunctionDefinitionTool(),
self.__SetFunctionNameTool(),
self.__GetIsMyWorkDoneTool(ea)
]
agent = initialize_agent(
agent_type=AgentType.OPENAI_MULTI_FUNCTIONS,
llm=ChatOpenAI(temperature=temperature, model=model),
tools=tools,
# callback_manager=BaseCallbackManager(handlers=[
# CopilotPanelCallbackManager()]),
verbose=True,
)
prompt = prompts.default_prompt_zh.format(
binary_description=f'name: {func_name}, address 0x{ea:x}'
# pseudocode=pseudocode
)
# 开启新线程运行agent
t = concurrent.futures.ThreadPoolExecutor()
loop = asyncio.get_event_loop()
loop.run_in_executor(t, agent.run, prompt)
class __GetAddressInfoTool(BaseTool):
name = 'get_address_info'
description = ('Given a hex address or function name, show its information. '
'**Input Format**: `<hex_address_or_function_name>`. '
'**Input Example1**: `sub_140007080`. '
'**Input Example2**: `0x140007080`.')
@staticmethod
def __get_address_info(name_or_hex_address: str):
try:
if name_or_hex_address.lower().startswith('0x'):
ea = int(name_or_hex_address, 16)
else:
ea = idaapi.get_name_ea(idaapi.BADADDR, name_or_hex_address)
if ea == idaapi.BADADDR:
raise Exception
except Exception:
return f'{name_or_hex_address} is not a valid address or name.'
flags = idaapi.get_flags(ea)
result = ''
# 检查地址是否位于函数内部
func = idaapi.get_func(ea)
if func:
result += "Address 0x%X is inside a function.\n" % ea
result += "Function start: 0x%X\n" % func.start_ea
result += "Function end: 0x%X\n" % func.end_ea
func_name = idaapi.get_func_name(func.start_ea)
if func_name:
result += "Function name: %s\n" % func_name
elif idaapi.is_code(flags):
result += "Address 0x%X is code.\n" % ea
elif idaapi.is_data(flags):
result += "Address 0x%X is data.\n" % ea
if idaapi.is_byte(flags):
result += "Data type: Byte\n"
result += "Value: %d\n" % idaapi.get_wide_byte(ea)
elif idaapi.is_word(flags):
result += "Data type: Word\n"
result += "Value: %d\n" % idaapi.get_wide_word(ea)
elif idaapi.is_dword(flags):
result += "Data type: Dword\n"
result += "Value: %d\n" % idaapi.get_wide_dword(ea)
elif idaapi.is_qword(flags):
result += "Data type: Qword\n"
result += "Value: %d\n" % idaapi.get_qword(ea)
elif idaapi.is_float(flags):
result += "Data type: Float\n"
# result += "Value: %f\n" % idaapi.get_wide_float(address)
elif idaapi.is_double(flags):
result += "Data type: Double\n"
# result += "Value: %f\n" % idaapi.get_wide_double(address)
elif idaapi.is_strlit(flags):
result += "Data type: String\n"
result += "Value: %s\n" % idaapi.get_strlit_contents(ea)
elif idaapi.is_struct(flags):
result += "Data type: Struct\n"
# ... 其他数据类型检查
elif idaapi.is_unknown(flags):
result += "Address 0x%X is unknown.\n" % ea
# 名称和注释
if idaapi.has_name(flags):
result += "Name: %s\n" % idaapi.get_name(ea)
elif idaapi.has_dummy_name(flags):
result += "Dummy name: %s\n" % idaapi.get_name(ea)
if idaapi.has_cmt(flags):
result += "Comment: %s\n" % idaapi.get_cmt(ea, 0)
if result == '':
result = 'Address not found.'
elif result[-1] == '\n':
result = result[:-1]
return result
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None) -> Any:
query = core.escape_agent_input(
query, 'get_address_info')
return core.pop_async_call_result(
idaapi.execute_sync(
lambda: core.push_async_call_result(self.__get_address_info(query)),
idaapi.MFF_WRITE))
class __GetDefinitionTool(BaseTool):
name = 'get_definition'
description = ('Given a function name, show its definition. '
'NOTICE that the result is decompiled by IDA, so it may NOT be accurate. '
'**Input Format**: `<function_name>`. '
'**Input Example**: `sub_140007080`.')
@staticmethod
def __get_definition(function_name: str):
try:
return core.decompile_by_name(function_name).definition
except Exception as e:
return f'Failed to decompile: {e}'
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None) -> Any:
query = core.escape_agent_input(query, 'get_definition')
return core.pop_async_call_result(
idaapi.execute_sync(
lambda: core.push_async_call_result(self.__get_definition(query)),
idaapi.MFF_WRITE))
class __GetPseudocodeTool(BaseTool):
name = 'get_pseudocode'
description = ('Given a function name or hex address of a function, show its pseudocode. '
'NOTICE that the result is decompiled by IDA, so it may NOT be accurate. '
'**Input Format**: `<function_name_or_hex_address>`. '
'**Input Example1**: `sub_140007080`. '
'**Input Example2**: `0x140007080`.')
@staticmethod
def __get_pseudocode(function_name_or_hex_address: str):
try:
if function_name_or_hex_address.lower().startswith('0x'):
ea = int(function_name_or_hex_address, 16)
return core.decompile_by_ea(ea).pseudocode
return core.decompile_by_name(function_name_or_hex_address).pseudocode
except Exception as e:
return f'Failed to decompile: {e}'
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None) -> Any:
query = core.escape_agent_input(
query, 'get_pseudocode')
return core.pop_async_call_result(
idaapi.execute_sync(
lambda: core.push_async_call_result(self.__get_pseudocode(query)),
idaapi.MFF_WRITE))
class __SetFunctionCommentTool(BaseTool):
name = 'set_function_comment'
description = ('Given a function name and a comment, set the comment of the function. '
'**Input Format**: `<function_name> <comment>`. '
'**Input Example**: `sub_140007080 Copilot Comment: This function is used to do something.`')
@staticmethod
def __set_function_comment(function_name_and_comment: str):
try:
func_name, comment = function_name_and_comment.split(' ', 1)
func_name = func_name.strip()
if not comment.startswith('Copilot Comment:'):
comment = 'Copilot Comment: ' + comment.strip()
core.decompile_by_name(func_name).comment = comment
return f'Successfully set comment of {func_name} to {comment}.'
except Exception as e:
return f'Failed to set comment: {e}'
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None) -> Any:
query = core.escape_agent_input(
query, 'set_function_comment')
return core.pop_async_call_result(
idaapi.execute_sync(
lambda: core.push_async_call_result(self.__set_function_comment(query)),
idaapi.MFF_WRITE))
class __SetFunctionDefinitionTool(BaseTool):
name = 'set_function_definition'
description = ('Set definition of a function. '
'**Input Format**: `<return_type> [calling_convention] <function_name>(<param_type> [param_name], ...)`. '
'**Input Example1**: `void sub_140005048(int a1, unsigned long long a2)`. '
'**Input Example2**: `NTSTATUS __fastcall DriverIoControl(PDRIVER_OBJECT, PIRP)`.')
@staticmethod
def __set_function_definition(new_definition: str):
func_pattern = re.compile(
r'(?P<ret_type>[\w\s*]+?)\s*(?P<cc>__\w+\s+)?(?P<func_name>\w+)\((?P<params>.*)\)')
# param_pattern = re.compile(r'(\w+\s*\*?)\s*(\w+)')
try:
match = func_pattern.match(new_definition)
if not match:
return f'Invalid function definition, not match: {new_definition}'
result = match.groupdict()
return_type = result['ret_type'].strip() if result['ret_type'] else None
if not return_type:
return f'Invalid function definition, no return type: {new_definition}'
# 上面的正则会漏掉一种情况
# 例如,`NTSTATUSsub_140005048(PDRIVER_OBJECT driverObject, PIRP irp)`
# 解析后,`ret_type`为`N`,`func_name`为`TSTATUSsub_140005048`
# 因此我们要把这种输入列为无效输入
if ' ' not in new_definition[:new_definition.index('(')]:
return f'Invalid function definition, no func name: {new_definition}'
func_name = result['func_name'].strip()
core.decompile_by_name(func_name).definition = new_definition
return f'Successfully set definition of {func_name} to {new_definition}.'
except Exception as e:
return f'Failed to set definition: {e}'
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None) -> Any:
query = core.escape_agent_input(
query, 'set_function_definition')
return core.pop_async_call_result(
idaapi.execute_sync(
lambda: core.push_async_call_result(self.__set_function_definition(query)),
idaapi.MFF_WRITE))
class __SetFunctionNameTool(BaseTool):
name = 'set_function_name'
description = ('Given a function name, rename it. '
'**Input Format**: <old_name> <new_name>. '
'**Input Example**: sub_140007080 DeviceIoControl.')
@staticmethod
def __set_function_name(old_name_and_new_name: str):
try:
old_name, new_name = old_name_and_new_name.split(' ')
old_name = old_name.strip()
core.decompile_by_name(old_name).name = new_name
return f'Successfully renamed {old_name} to {new_name}.'
except Exception as e:
return f'Failed to set function name: {e}'
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None) -> Any:
return core.pop_async_call_result(
idaapi.execute_sync(
lambda: core.push_async_call_result(self.__set_function_name(query)),
idaapi.MFF_WRITE))
class __GetIsMyWorkDoneTool(BaseTool):
name = 'get_is_my_work_done'
description = ('Given a function name, return whether the work is done. '
'Also return tips if not done.')
func: Optional[core.DecompiledFunction] = None
def __init__(self, current_func_ea, **kwargs: Any):
super().__init__(**kwargs)
self.func = core.decompile_by_ea(current_func_ea)
def __get_is_my_work_done(self):
try:
for function in self.func.functions:
ea = function['ea']
func_name = idaapi.get_func_name(ea)
if func_name.startswith('sub_'):
return (f'No, function `{func_name}` at 0x{ea:x} is not renamed yet. Please continue your work.'
f'REMEMBER, your goal is to rename all functions that start with `sub_`.'
f'AND, your are analyzing function `{self.func.name}`.')
return f'Yes, function `{self.func.name}` is fully analyzed.'
except Exception as e:
return f'Failed to get is my work done: {e}'
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None) -> Any:
return core.pop_async_call_result(
idaapi.execute_sync(
lambda: core.push_async_call_result(self.__get_is_my_work_done()),
idaapi.MFF_WRITE))
# Path: ida_copilot.py
import ida_hexrays
import ida_kernwin
import idaapi
from ida_copilot import panel
from ida_copilot.copilot import Copilot
class CopilotPluginActionHandler(idaapi.action_handler_t):
def __init__(self):
super(CopilotPluginActionHandler, self).__init__()
def activate(self, ctx):
ida_kernwin.show_wait_box('HIDECANCEL\nRunning Copilot...')
try:
Copilot().run()
finally:
ida_kernwin.hide_wait_box()
ida_hexrays.get_widget_vdui(ctx.widget).refresh_view(True)
ida_kernwin.refresh_idaview_anyway()
def on_task_complete(self, future):
# 关闭进度条或状态信息
ida_kernwin.hide_wait_box()
# 更新UI...
ida_kernwin.refresh_idaview_anyway()
def update(self, ctx):
return idaapi.AST_ENABLE_ALWAYS
class CopilotPlugin(idaapi.plugin_t):
flags = idaapi.PLUGIN_UNL
comment = "Copilot"
help = "Copilot"
wanted_name = "Copilot"
wanted_hotkey = ""
def init(self):
if not ida_hexrays.init_hexrays_plugin():
print("Hex-Rays decompiler is not available!")
return
run_action = idaapi.action_desc_t(
'copilot:run',
'Run Copilot',
CopilotPluginActionHandler(),
'Ctrl+Shift+P',
'使用Copilot分析当前函数',
-1)
idaapi.register_action(run_action)
idaapi.attach_action_to_menu(
'Edit/Copilot',
'copilot:run',
idaapi.SETMENU_APP)
action_desc = idaapi.action_desc_t(
'copilot:show_panel',
'Show Copilot',
panel.ShowCopilotPanel(panel.CopilotPanel()),
None,
'Copilot integration',
0
)
idaapi.register_action(action_desc)
# 添加菜单项
idaapi.attach_action_to_menu(
'Windows/Copilot',
'copilot:show_panel',
idaapi.SETMENU_APP)
return idaapi.PLUGIN_KEEP
def run(self, arg):
idaapi.require('ida_copilot')
print('Copilot reloaded')
def term(self):
idaapi.detach_action_from_menu(
'Edit/Copilot',
'copilot:run')
idaapi.unregister_action('copilot:run')
idaapi.detach_action_from_menu(
'Windows/Copilot',
'copilot:show_panel')
| idaapi.unregister_action('copilot:show_panel') |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: aws-samples/amazon-location-geospatial-agent
# Path: geospatial_agent/agent/action_summarizer/action_summarizer.py
class ActionSummarizer:
"""Action summarizer acts on raw user messages with the following traits
1. It is a geospatial query or analysis such as "Draw me a heatmap".
2. Has URLS of data to be used for the analysis.
ActionSummarizer generates a list of ActionSummary.
"""
def __init__(self, llm=None):
if llm is None:
claude_v2 = get_claude_v2()
self.llm = claude_v2
else:
self.llm = llm
def invoke(self, user_input: str, session_id: str, storage_mode: str) -> ActionSummary:
try:
action_context = self._extract_action_context(user_input)
dispatcher.send(signal=SIGNAL_ACTION_CONTEXT_GENERATED,
sender=SENDER_ACTION_SUMMARIZER,
event_data=AgentSignal(
event_type=EventType.Message,
event_source=SENDER_ACTION_SUMMARIZER,
event_message=f'Detected desired action {action_context.action}. And file paths: {action_context.file_paths}.'
))
read_file_code = self._gen_file_read_code(action_context, session_id, storage_mode)
dispatcher.send(signal=SIGNAL_FILE_READ_CODE_GENERATED,
sender=SENDER_ACTION_SUMMARIZER,
event_data=AgentSignal(
event_type=EventType.PythonCode,
event_source=SENDER_ACTION_SUMMARIZER,
event_message=f'Generated code to read and understand data schema.',
event_data=read_file_code
))
data_files_summary = self._gen_file_summaries_from_executing_code(read_file_code)
dispatcher.send(signal=SIGNAL_FILE_READ_CODE_EXECUTED,
sender=SENDER_ACTION_SUMMARIZER,
event_data=AgentSignal(
event_type=EventType.Message,
event_source=SENDER_ACTION_SUMMARIZER,
event_message=f'Successfully executed code to read and understand data schema.',
))
file_summaries = self._gen_file_summaries_for_action(action_context.action, data_files_summary)
return ActionSummary(action=action_context.action, file_summaries=file_summaries)
except Exception as e:
if isinstance(e, ActionSummarizerException):
raise e
else:
raise ActionSummarizerException(
message=f"Failed to extract dataframes from data reading code. Original exception: {e}") from e
def _gen_file_summaries_for_action(self, action: str, file_summaries: List[FileSummary]) -> List[FileSummary]:
for item in file_summaries:
requirements_str = "\n".join(
[f"{index + 1}. {requirement}" for index, requirement in enumerate(_DATA_SUMMARY_REQUIREMENTS)])
file_summary_template: PromptTemplate = PromptTemplate.from_template(_DATA_SUMMARY_PROMPT)
gdf_str = item.data_frame.to_json()
if len(gdf_str) > 4000:
gdf_str = gdf_str[:4000]
chain = LLMChain(llm=self.llm, prompt=file_summary_template)
file_summary = chain.run(
role_intro=_ROLE_INTRO,
human_role=HUMAN_ROLE,
requirements=requirements_str,
action=action,
columns=item.column_names,
table=gdf_str,
assistant_role=ASSISTANT_ROLE,
stop=[HUMAN_STOP_SEQUENCE]
).strip()
item.file_summary = file_summary
return file_summaries
def _gen_file_read_code(self, action_context: ActionContext, session_id: str, storage_mode: str) -> str:
file_paths = action_context.file_paths
file_urls_str = "\n".join(
[f"{index + 1}. {file_url}" for index, file_url in enumerate(file_paths)])
requirements_str = "\n".join(
[f"{index + 1}. {requirement}" for index, requirement in enumerate(_READ_FILE_REQUIREMENTS)])
read_file_template: PromptTemplate = PromptTemplate.from_template(_READ_FILE_PROMPT)
chain = LLMChain(llm=self.llm, prompt=read_file_template)
read_file_code_response = chain.run(
role_intro=_ROLE_INTRO,
human_role=HUMAN_ROLE,
requirements=requirements_str,
session_id=session_id,
storage_mode=storage_mode,
assistant_role=ASSISTANT_ROLE,
file_urls=file_urls_str,
stop=[HUMAN_STOP_SEQUENCE]
).strip()
read_file_code = extract_code(read_file_code_response)
return read_file_code
@staticmethod
def _gen_file_summaries_from_executing_code(code: str) -> List[FileSummary]:
assembled_code = f'{get_shim_imports()}\n{code}'
output, _globals = execute_assembled_code(assembled_code)
dataframes = _globals[DATA_FRAMES_VARIABLE_NAME]
file_summaries = [FileSummary(**data) for data in dataframes]
if len(file_summaries) == 0:
raise ActionSummarizerException(
message=f"Failed to generate file summaries from executing code. "
f"No dataframes found in globals")
for item in file_summaries:
if not isinstance(item.file_url, str):
raise ActionSummarizerException(
message=f"Failed to generate file summaries from executing code. "
f"Found {type(item.file_url)} instead of str")
if not isinstance(item.column_names, list):
raise ActionSummarizerException(
message=f"Failed to generate file summaries from executing code. "
f"Found {type(item.column_names)} instead of list")
return file_summaries
def _extract_action_context(self, user_input: str) -> ActionContext:
filepaths_extract_template: PromptTemplate = PromptTemplate.from_template(_ACTION_SUMMARY_PROMPT)
requirements_str = "\n".join(
[f"{index + 1}. {requirement}" for index, requirement in enumerate(_ACTION_SUMMARY_REQUIREMENTS)])
chain = LLMChain(llm=self.llm, prompt=filepaths_extract_template)
action_summary = chain.run(
role_intro=_ROLE_INTRO,
human_role=HUMAN_ROLE,
requirements=requirements_str,
assistant_role=ASSISTANT_ROLE,
message=user_input,
stop=[HUMAN_STOP_SEQUENCE]
).strip()
try:
action_summary_obj = ActionContext.parse_raw(action_summary)
return action_summary_obj
except json.JSONDecodeError as e:
raise ValueError("Invalid JSON format.") from e
# Path: geospatial_agent/agent/action_summarizer/action_summarizer.py
class ActionSummary(BaseModel):
action: str
file_summaries: List[FileSummary]
# Path: geospatial_agent/agent/geo_chat/tools/gis_work_tool.py
def gis_work_tool(session_id: str, storage_mode: str, action_summarizer=None, gis_agent=None):
desc = f"""\
A tool that invokes a {GeospatialAgent.__name__} if the user action is requires geospatial analysis to be done on user provided data.
{GeospatialAgent.__name__} description: {GeospatialAgent.__doc__}
It accepts two inputs: user_input and session_id.
An example query might look like the following:
Draw time series choropleth map of weather temperature change over major cities of the world.
Data Locations:
1. Climate Change: Earth Surface Temperature Data location since 1940s data location: GlobalLandTemperaturesByCity.csv
A qualified action for the tool have the following requirements:
1. A geospatial analysis action such as heatmap, choropleth, or time series.
2. A data location such as a scheme://URI or just a file name such as data.csv.
DO NOT invoke this tool unless both of these requirements are met.
This tool will invoke the GIS agent to perform the geospatial analysis on the data.
The return is freeform string or a URL to the result of the analysis."""
if action_summarizer is None:
action_summarizer = ActionSummarizer()
if gis_agent is None:
gis_agent = GeospatialAgent(storage_mode=storage_mode)
def gis_work_tool_func(user_input: str):
action_summary = action_summarizer.invoke(
user_input=user_input, session_id=session_id, storage_mode=storage_mode)
output = gis_agent.invoke(action_summary=action_summary, session_id=session_id)
return (f"Observation: GIS Agent has completed it's work. I should list the generated code file path, and "
f"generated visualization file path from the code output, if applicable."
f"Generated code path = {output.assembled_code_file_path}. "
f"Generated code output = {output.assembled_code_output}.")
return Tool.from_function(func=gis_work_tool_func, name=GIS_WORK_TOOL, description=desc)
# Path: geospatial_agent/agent/geospatial/agent.py
class GeospatialAgent:
"""A geospatial data scientist and a python developer agent written by Amazon Location Service."""
_assembled_code_file_name = "assembled_code.py"
def __init__(self, storage_mode: str):
claude_v2 = get_claude_v2()
self.llm = claude_v2
self.local_storage = LocalStorage()
self.storage_mode = storage_mode
def invoke(self, action_summary: ActionSummary, session_id: str) -> GISAgentResponse:
try:
# INFO: Generating a task name from the action summary action
task_name = gen_task_name(self.llm, action_summary.action)
dispatcher.send(signal=SIGNAL_TASK_NAME_GENERATED,
sender=SENDER_GEOSPATIAL_AGENT,
event_data=AgentSignal(
event_source=SENDER_GEOSPATIAL_AGENT,
event_message=f"I will use task name {task_name} to gather all generated artifacts.",
))
data_locations_instructions = self._get_data_locations_instructions(action_summary)
# INFO: Generating the graph plan to write code
graph_plan_code = gen_plan_graph(self.llm,
task_definition=action_summary.action,
data_locations_instructions=data_locations_instructions)
dispatcher.send(
signal=SIGNAL_GRAPH_CODE_GENERATED,
sender=SENDER_GEOSPATIAL_AGENT,
event_data=AgentSignal(
event_source=SENDER_GEOSPATIAL_AGENT,
event_message=f'Generated plan graph code.',
event_type=EventType.PythonCode,
event_data=graph_plan_code
))
# INFO: Executing the graph plan code and get the graph object and the repl output
graph, repl_output = self._execute_plan_graph_code(graph_plan_code)
graph_file_abs_path = self._write_local_graph_file(graph, session_id=session_id, task_name=task_name)
solver = Solver(
llm=self.llm,
graph=graph,
graph_code=graph_plan_code,
session_id=session_id,
storage_mode=self.storage_mode,
task_definition=action_summary.action,
task_name=task_name,
data_locations_instructions=data_locations_instructions)
op_defs = solver.solve()
assembled_code = solver.assemble()
dispatcher.send(signal=SIGNAL_ASSEMBLED_CODE_EXECUTING,
sender=SENDER_GEOSPATIAL_AGENT,
event_data=AgentSignal(
event_source=SENDER_GEOSPATIAL_AGENT,
event_message="Saving and executing assembled code",
))
code_file_abs_path = self._write_local_code_file(assembled_code=assembled_code, session_id=session_id,
task_name=task_name)
code_output, _ = execute_assembled_code(assembled_code)
if code_output is not None:
dispatcher.send(signal=SIGNAL_ASSEMBLED_CODE_EXECUTED,
sender=SENDER_GEOSPATIAL_AGENT,
event_data=AgentSignal(
event_source=SENDER_GEOSPATIAL_AGENT,
event_message=code_output
))
return GISAgentResponse(
graph_plan_code=graph_plan_code,
graph=graph,
repl_output=repl_output,
op_defs=op_defs,
assembled_code=assembled_code,
assembled_code_output=code_output,
assembled_code_file_path=code_file_abs_path,
)
except Exception as e:
raise GISAgentException(message="Error occurred while executing the graph plan code") from e
@staticmethod
def _get_data_locations_instructions(action_summary):
# Generating a string for all the data locations from action_summary
# For each file in action_summary.file_summaries, we will generate a string of:
# "File Location: <file_url>",
# "Column Names: <column_names>",
# "Summary: <file_summary>"
# We will then join these strings with a new line character and return it.
# We will also add a new line character at the end of the string.
data_locations_instructions = ""
for file_summary in action_summary.file_summaries:
instr = ""
instr += f"File Location: {file_summary.file_url}\n"
instr += f"Column Names: {file_summary.column_names}\n"
instr += f"Summary: {file_summary.file_summary}\n"
data_locations_instructions += instr
return data_locations_instructions
def _write_local_graph_file(self, graph, session_id: str, task_name: str) -> str:
graph_file_path = self.local_storage.get_generated_file_url(
file_path="plan_graph.graphml", session_id=session_id, task_name=task_name)
parent_dir = os.path.dirname(graph_file_path)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
networkx.write_graphml(graph, graph_file_path, named_key_ids=False)
return os.path.abspath(graph_file_path)
def _write_local_code_file(self, session_id: str, assembled_code: str, task_name: str):
return self.local_storage.write_file(
file_name=self._assembled_code_file_name,
session_id=session_id,
task_name=task_name,
content=assembled_code
)
@staticmethod
def _execute_plan_graph_code(graph_plan_code) -> tuple[networkx.DiGraph, str]:
"""Returns the plan graph object by executing the graph plan code."""
output, _globals = execute_assembled_code(graph_plan_code)
graph: networkx.DiGraph = _globals['G']
return graph, output
# Path: tests/test_gis_work_tool.py
from unittest.mock import Mock
from assertpy import assert_that
from langchain.tools import Tool
from geospatial_agent.agent.action_summarizer.action_summarizer import ActionSummarizer, ActionSummary
from geospatial_agent.agent.geo_chat.tools.gis_work_tool import gis_work_tool
from geospatial_agent.agent.geospatial.agent import GeospatialAgent
def test_initializing_gis_work_tool_does_not_raise_error():
# Create a mock ActionSummarizer object
mock_action_summarizer = Mock(spec=ActionSummarizer)
# Create a mock GeospatialAgent object
mock_geospatial_agent = Mock(spec=GeospatialAgent)
tool = gis_work_tool(
session_id='test-session-id',
action_summarizer=mock_action_summarizer,
gis_agent=mock_geospatial_agent,
storage_mode='test-storage-mode'
)
assert_that(tool).is_not_none()
assert_that(tool).is_instance_of(Tool)
def test_using_gis_work_tool_does_not_raise_error():
mock_action_summarizer = ActionSummarizer
mock_action_summarizer.invoke = Mock(
return_value=ActionSummary(
action="The user wants to draw a heatmap",
file_summaries=[]
))
mock_gis_agent = GeospatialAgent
| mock_gis_agent.invoke = Mock(return_value=None) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Rishit-dagli/Astroformer
# Path: pytorch-image-models/timm/models/_builder.py
def build_model_with_cfg(
model_cls: Callable,
variant: str,
pretrained: bool,
pretrained_cfg: Optional[Dict] = None,
pretrained_cfg_overlay: Optional[Dict] = None,
model_cfg: Optional[Any] = None,
feature_cfg: Optional[Dict] = None,
pretrained_strict: bool = True,
pretrained_filter_fn: Optional[Callable] = None,
kwargs_filter: Optional[Tuple[str]] = None,
**kwargs,
):
""" Build model with specified default_cfg and optional model_cfg
This helper fn aids in the construction of a model including:
* handling default_cfg and associated pretrained weight loading
* passing through optional model_cfg for models with config based arch spec
* features_only model adaptation
* pruning config / model adaptation
Args:
model_cls (nn.Module): model class
variant (str): model variant name
pretrained (bool): load pretrained weights
pretrained_cfg (dict): model's pretrained weight/task config
model_cfg (Optional[Dict]): model's architecture config
feature_cfg (Optional[Dict]: feature extraction adapter config
pretrained_strict (bool): load pretrained weights strictly
pretrained_filter_fn (Optional[Callable]): filter callable for pretrained weights
kwargs_filter (Optional[Tuple]): kwargs to filter before passing to model
**kwargs: model args passed through to model __init__
"""
pruned = kwargs.pop('pruned', False)
features = False
feature_cfg = feature_cfg or {}
# resolve and update model pretrained config and model kwargs
pretrained_cfg = resolve_pretrained_cfg(
variant,
pretrained_cfg=pretrained_cfg,
pretrained_cfg_overlay=pretrained_cfg_overlay
)
# FIXME converting back to dict, PretrainedCfg use should be propagated further, but not into model
pretrained_cfg = pretrained_cfg.to_dict()
_update_default_kwargs(pretrained_cfg, kwargs, kwargs_filter)
# Setup for feature extraction wrapper done at end of this fn
if kwargs.pop('features_only', False):
features = True
feature_cfg.setdefault('out_indices', (0, 1, 2, 3, 4))
if 'out_indices' in kwargs:
feature_cfg['out_indices'] = kwargs.pop('out_indices')
# Instantiate the model
if model_cfg is None:
model = model_cls(**kwargs)
else:
model = model_cls(cfg=model_cfg, **kwargs)
model.pretrained_cfg = pretrained_cfg
model.default_cfg = model.pretrained_cfg # alias for backwards compat
if pruned:
model = adapt_model_from_file(model, variant)
# For classification models, check class attr, then kwargs, then default to 1k, otherwise 0 for feats
num_classes_pretrained = 0 if features else getattr(model, 'num_classes', kwargs.get('num_classes', 1000))
if pretrained:
load_pretrained(
model,
pretrained_cfg=pretrained_cfg,
num_classes=num_classes_pretrained,
in_chans=kwargs.get('in_chans', 3),
filter_fn=pretrained_filter_fn,
strict=pretrained_strict,
)
# Wrap the model in a feature extraction module if enabled
if features:
feature_cls = FeatureListNet
output_fmt = getattr(model, 'output_fmt', None)
if output_fmt is not None:
feature_cfg.setdefault('output_fmt', output_fmt)
if 'feature_cls' in feature_cfg:
feature_cls = feature_cfg.pop('feature_cls')
if isinstance(feature_cls, str):
feature_cls = feature_cls.lower()
if 'hook' in feature_cls:
feature_cls = FeatureHookNet
elif feature_cls == 'fx':
feature_cls = FeatureGraphNet
else:
assert False, f'Unknown feature class {feature_cls}'
model = feature_cls(model, **feature_cfg)
model.pretrained_cfg = pretrained_cfg_for_features(pretrained_cfg) # add back pretrained cfg
model.default_cfg = model.pretrained_cfg # alias for rename backwards compat (default_cfg -> pretrained_cfg)
return model
# Path: pytorch-image-models/timm/models/_features_fx.py
def register_notrace_function(func: Callable):
"""
Decorator for functions which ought not to be traced through
"""
_autowrap_functions.add(func)
return func
# Path: pytorch-image-models/timm/models/_manipulate.py
def checkpoint_seq(
functions,
x,
every=1,
flatten=False,
skip_last=False,
preserve_rng_state=True
):
r"""A helper function for checkpointing sequential models.
Sequential models execute a list of modules/functions in order
(sequentially). Therefore, we can divide such a sequence into segments
and checkpoint each segment. All segments except run in :func:`torch.no_grad`
manner, i.e., not storing the intermediate activations. The inputs of each
checkpointed segment will be saved for re-running the segment in the backward pass.
See :func:`~torch.utils.checkpoint.checkpoint` on how checkpointing works.
.. warning::
Checkpointing currently only supports :func:`torch.autograd.backward`
and only if its `inputs` argument is not passed. :func:`torch.autograd.grad`
is not supported.
.. warning:
At least one of the inputs needs to have :code:`requires_grad=True` if
grads are needed for model inputs, otherwise the checkpointed part of the
model won't have gradients.
Args:
functions: A :class:`torch.nn.Sequential` or the list of modules or functions to run sequentially.
x: A Tensor that is input to :attr:`functions`
every: checkpoint every-n functions (default: 1)
flatten (bool): flatten nn.Sequential of nn.Sequentials
skip_last (bool): skip checkpointing the last function in the sequence if True
preserve_rng_state (bool, optional, default=True): Omit stashing and restoring
the RNG state during each checkpoint.
Returns:
Output of running :attr:`functions` sequentially on :attr:`*inputs`
Example:
>>> model = nn.Sequential(...)
>>> input_var = checkpoint_seq(model, input_var, every=2)
"""
def run_function(start, end, functions):
def forward(_x):
for j in range(start, end + 1):
_x = functions[j](_x)
return _x
return forward
if isinstance(functions, torch.nn.Sequential):
functions = functions.children()
if flatten:
functions = chain.from_iterable(functions)
if not isinstance(functions, (tuple, list)):
functions = tuple(functions)
num_checkpointed = len(functions)
if skip_last:
num_checkpointed -= 1
end = -1
for start in range(0, num_checkpointed, every):
end = min(start + every - 1, num_checkpointed - 1)
x = checkpoint(run_function(start, end, functions), x, preserve_rng_state=preserve_rng_state)
if skip_last:
return run_function(end + 1, len(functions) - 1, functions)(x)
return x
# Path: pytorch-image-models/timm/models/_manipulate.py
def named_apply(
fn: Callable,
module: nn.Module, name='',
depth_first: bool = True,
include_root: bool = False,
) -> nn.Module:
if not depth_first and include_root:
fn(module=module, name=name)
for child_name, child_module in module.named_children():
child_name = '.'.join((name, child_name)) if name else child_name
named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True)
if depth_first and include_root:
fn(module=module, name=name)
return module
# Path: pytorch-image-models/timm/models/_registry.py
def generate_default_cfgs(cfgs: Dict[str, Union[Dict[str, Any], PretrainedCfg]]):
out = defaultdict(DefaultCfg)
default_set = set() # no tag and tags ending with * are prioritized as default
for k, v in cfgs.items():
if isinstance(v, dict):
v = PretrainedCfg(**v)
has_weights = v.has_weights
model, tag = split_model_name_tag(k)
is_default_set = model in default_set
priority = (has_weights and not tag) or (tag.endswith('*') and not is_default_set)
tag = tag.strip('*')
default_cfg = out[model]
if priority:
default_cfg.tags.appendleft(tag)
default_set.add(model)
elif has_weights and not default_cfg.is_pretrained:
default_cfg.tags.appendleft(tag)
else:
default_cfg.tags.append(tag)
if has_weights:
default_cfg.is_pretrained = True
default_cfg.cfgs[tag] = v
return out
# Path: pytorch-image-models/timm/models/_registry.py
def register_model(fn: Callable[..., Any]) -> Callable[..., Any]:
# lookup containing module
mod = sys.modules[fn.__module__]
module_name_split = fn.__module__.split('.')
module_name = module_name_split[-1] if len(module_name_split) else ''
# add model to __all__ in module
model_name = fn.__name__
if hasattr(mod, '__all__'):
mod.__all__.append(model_name)
else:
mod.__all__ = [model_name] # type: ignore
# add entries to registry dict/sets
_model_entrypoints[model_name] = fn
_model_to_module[model_name] = module_name
_module_to_models[module_name].add(model_name)
if hasattr(mod, 'default_cfgs') and model_name in mod.default_cfgs:
# this will catch all models that have entrypoint matching cfg key, but miss any aliasing
# entrypoints or non-matching combos
default_cfg = mod.default_cfgs[model_name]
if not isinstance(default_cfg, DefaultCfg):
# new style default cfg dataclass w/ multiple entries per model-arch
assert isinstance(default_cfg, dict)
# old style cfg dict per model-arch
pretrained_cfg = PretrainedCfg(**default_cfg)
default_cfg = DefaultCfg(tags=deque(['']), cfgs={'': pretrained_cfg})
for tag_idx, tag in enumerate(default_cfg.tags):
is_default = tag_idx == 0
pretrained_cfg = default_cfg.cfgs[tag]
model_name_tag = '.'.join([model_name, tag]) if tag else model_name
replace_items = dict(architecture=model_name, tag=tag if tag else None)
if pretrained_cfg.hf_hub_id and pretrained_cfg.hf_hub_id == 'timm/':
# auto-complete hub name w/ architecture.tag
replace_items['hf_hub_id'] = pretrained_cfg.hf_hub_id + model_name_tag
pretrained_cfg = replace(pretrained_cfg, **replace_items)
if is_default:
_model_pretrained_cfgs[model_name] = pretrained_cfg
if pretrained_cfg.has_weights:
# add tagless entry if it's default and has weights
_model_has_pretrained.add(model_name)
if tag:
_model_pretrained_cfgs[model_name_tag] = pretrained_cfg
if pretrained_cfg.has_weights:
# add model w/ tag if tag is valid
_model_has_pretrained.add(model_name_tag)
_model_with_tags[model_name].append(model_name_tag)
else:
_model_with_tags[model_name].append(model_name) # has empty tag (to slowly remove these instances)
_model_default_cfgs[model_name] = default_cfg
return fn
# Path: pytorch-image-models/timm/models/astroformer.py
import sys
import math
import torch
from dataclasses import dataclass, field
from typing import Optional, Tuple, Union
from collections import OrderedDict
from dataclasses import dataclass, field, replace
from functools import partial
from typing import Callable, List, Optional, Tuple, Union
from timm.layers import (
ClassifierHead,
ConvMlp,
DropPath,
LayerNorm,
Mlp,
NormMlpClassifierHead,
RelPosBias,
RelPosBiasTf,
RelPosMlp,
_assert,
create_attn,
create_conv2d,
create_pool2d,
extend_tuple,
get_act_layer,
get_norm_act_layer,
get_norm_layer,
make_divisible,
resize_rel_pos_bias_table,
to_2tuple,
trunc_normal_tf_,
use_fused_attn,
)
from timm.models.registry import register_model
from torch import nn
from torch.jit import Final
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_function
from ._manipulate import checkpoint_seq, named_apply
from ._registry import generate_default_cfgs, register_model
# This code is heavily based on https://github.com/huggingface/pytorch-image-models
sys.path.append("../../../")
@dataclass
class MaxxVitTransformerCfg:
dim_head: int = 32
head_first: bool = True # head ordering in qkv channel dim
expand_ratio: float = 4.0
expand_first: bool = True
shortcut_bias: bool = True
attn_bias: bool = True
attn_drop: float = 0.0
proj_drop: float = 0.0
pool_type: str = "avg2"
rel_pos_type: str = "bias"
rel_pos_dim: int = 512 # for relative position types w/ MLP
partition_ratio: int = 32
window_size: Optional[Tuple[int, int]] = None
grid_size: Optional[Tuple[int, int]] = None
no_block_attn: bool = (
False # disable window block attention for maxvit (ie only grid)
)
use_nchw_attn: bool = (
False # for MaxViT variants (not used for CoAt), keep tensors in NCHW order
)
init_values: Optional[float] = None
act_layer: str = "gelu"
norm_layer: str = "layernorm2d"
norm_layer_cl: str = "layernorm"
norm_eps: float = 1e-6
def __post_init__(self):
if self.grid_size is not None:
self.grid_size = to_2tuple(self.grid_size)
if self.window_size is not None:
self.window_size = to_2tuple(self.window_size)
if self.grid_size is None:
self.grid_size = self.window_size
@dataclass
class MaxxVitConvCfg:
block_type: str = "mbconv"
expand_ratio: float = 4.0
expand_output: bool = (
True # calculate expansion channels from output (vs input chs)
)
kernel_size: int = 3
group_size: int = 1 # 1 == depthwise
pre_norm_act: bool = False # activation after pre-norm
output_bias: bool = True # bias for shortcut + final 1x1 projection conv
stride_mode: str = "dw" # stride done via one of 'pool', '1x1', 'dw'
pool_type: str = "avg2"
downsample_pool_type: str = "avg2"
padding: str = ""
attn_early: bool = (
False # apply attn between conv2 and norm2, instead of after norm2
)
attn_layer: str = "se"
attn_act_layer: str = "silu"
attn_ratio: float = 0.25
init_values: Optional[float] = 1e-6 # for ConvNeXt block, ignored by MBConv
act_layer: str = "gelu"
norm_layer: str = ""
norm_layer_cl: str = ""
norm_eps: Optional[float] = None
def __post_init__(self):
# mbconv vs convnext blocks have different defaults, set in post_init to avoid explicit config args
assert self.block_type in ("mbconv", "convnext")
use_mbconv = self.block_type == "mbconv"
if not self.norm_layer:
self.norm_layer = "batchnorm2d" if use_mbconv else "layernorm2d"
if not self.norm_layer_cl and not use_mbconv:
self.norm_layer_cl = "layernorm"
| if self.norm_eps is None: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: AdFiFi/D-FaST
# Path: config.py
def init_model_config(args, data_config: DataConfig):
if args.model == "BNT":
model_config = BNTConfig(node_size=data_config.node_size,
sizes=(data_config.node_size, data_config.node_size // 2),
num_classes=data_config.num_class,
pooling=(False, True),
pos_encoding=None, # identity, none
orthogonal=True,
# freeze_center=True,
freeze_center=False,
project_assignment=True,
num_heads=args.num_heads,
pos_embed_dim=data_config.node_size,
dim_feedforward=1024,
)
model = BNT(model_config)
elif args.model == "FBNetGen":
model_config = FBNetGenConfig(activation='gelu',
dropout=0.5,
# extractor_type='gru', # gru or cnn
extractor_type='cnn', # gru or cnn
# d_model=16,
d_model=40,
node_size=data_config.node_size,
node_feature_size=data_config.node_feature_size,
time_series_size=data_config.time_series_size,
num_classes=data_config.num_class,
window_size=5,
# window_size=40,
# window_size=50,
cnn_pool_size=16,
graph_generation='product', # product or linear
num_gru_layers=4,
group_loss=True,
sparsity_loss=True,
sparsity_loss_weight=1.0e-4)
model = FBNetGen(model_config)
elif args.model == 'BrainNetCNN':
model_config = BrainNetCNNConfig(node_size=data_config.node_size,
num_classes=data_config.num_class)
model = BrainNetCNN(model_config)
elif args.model == 'STAGIN':
model_config = STAGINConfig(node_size=data_config.node_size,
num_classes=data_config.num_class,
d_model=args.d_model,
num_layers=args.num_layers,
window_size=args.window_size,
window_stride=args.window_stride,
dynamic_length=args.dynamic_length,
sampling_init=args.sampling_init)
model = STAGIN(model_config)
elif args.model == "Transformer":
model_config = TransformerConfig(node_size=data_config.node_size,
num_classes=data_config.num_class,
node_feature_size=data_config.node_feature_size,
readout='concat',
num_layers=args.num_layers)
model = Transformer(model_config)
elif args.model == "EEGNet":
model_config = EEGNetConfig(node_size=data_config.node_size,
time_series_size=data_config.time_series_size,
node_feature_size=data_config.node_feature_size,
num_classes=data_config.num_class,
frequency=args.frequency,
D=args.D,
num_kernels=args.num_kernels,
p1=args.p1,
p2=args.p2,
dropout=args.dropout)
model_config.class_weight = data_config.class_weight
model = EEGNet(model_config)
elif args.model == "DFaST":
model_config = DFaSTConfig(node_size=data_config.node_size,
time_series_size=data_config.time_series_size,
node_feature_size=data_config.node_feature_size,
num_classes=data_config.num_class,
sparsity=args.sparsity,
frequency=args.frequency,
D=args.D,
p1=args.p1,
p2=args.p2,
k=args.k,
num_kernels=args.num_kernels,
d_model=args.d_model,
window_size=args.window_size,
window_stride=args.window_stride,
dynamic_length=args.dynamic_length,
num_heads=args.num_heads,
dim_feedforward=args.dim_feedforward,
num_spatial_layers=args.num_layers,
num_node_temporal_layers=args.num_node_temporal_layers,
num_graph_temporal_layers=args.num_graph_temporal_layers,
attention_depth=args.attention_depth,
activation=args.activation,
dropout=args.dropout,
# distill=(False, ) + (args.num_layers - 1) *
# ((True,) if args.distill else (False,)),
distill=args.num_layers * ((True,) if args.distill else (False,)),
initializer=args.initializer,
label_smoothing=args.epsilon_ls
)
model_config.class_weight = data_config.class_weight
model = DFaSTForClassification(model_config)
elif args.model == "DFaSTOnlySpatial":
model_config = DFaSTConfig(node_size=data_config.node_size,
time_series_size=data_config.time_series_size,
node_feature_size=data_config.node_feature_size,
num_classes=data_config.num_class,
sparsity=args.sparsity,
frequency=args.frequency,
D=args.D,
p1=args.p1,
p2=args.p2,
k=args.k,
num_kernels=args.num_kernels,
d_model=args.d_model,
window_size=args.window_size,
window_stride=args.window_stride,
dynamic_length=args.dynamic_length,
num_heads=args.num_heads,
dim_feedforward=args.dim_feedforward,
num_spatial_layers=args.num_layers,
num_node_temporal_layers=args.num_node_temporal_layers,
num_graph_temporal_layers=args.num_graph_temporal_layers,
attention_depth=args.attention_depth,
activation=args.activation,
dropout=args.dropout,
# distill=(False, ) + (args.num_layers - 1) *
# ((True,) if args.distill else (False,)),
distill=args.num_layers * ((True,) if args.distill else (False,)),
initializer=args.initializer,
label_smoothing=args.epsilon_ls
)
model_config.class_weight = data_config.class_weight
model = DFaSTOnlySpatialForClassification(model_config)
elif args.model == "LMDA":
model_config = LMDAConfig(node_size=data_config.node_size,
time_series_size=data_config.time_series_size,
node_feature_size=data_config.node_feature_size,
num_classes=data_config.num_class,
depth=9,
channel_depth1=args.num_kernels,
channel_depth2=9,
ave_depth=1,
avepool=5
)
model_config.class_weight = data_config.class_weight
model = LMDA(model_config)
elif args.model == "ShallowConvNet":
model_config = ShallowConvNetConfig(node_size=data_config.node_size,
time_series_size=data_config.time_series_size,
node_feature_size=data_config.node_feature_size,
num_classes=data_config.num_class,
num_kernels=args.num_kernels
)
model_config.class_weight = data_config.class_weight
model = ShallowConvNet(model_config)
elif args.model == "DeepConvNet":
model_config = DeepConvNetConfig(node_size=data_config.node_size,
time_series_size=data_config.time_series_size,
node_feature_size=data_config.node_feature_size,
num_classes=data_config.num_class,
num_kernels=25
)
model_config.class_weight = data_config.class_weight
model = DeepConvNet(model_config)
elif args.model == "RACNN":
model_config = RACNNConfig(node_size=data_config.node_size,
time_series_size=data_config.time_series_size,
node_feature_size=data_config.node_feature_size,
num_classes=data_config.num_class,
k=args.k
)
model_config.class_weight = data_config.class_weight
model = RACNN(model_config)
elif args.model == "EEGChannelNet":
model_config = EEGChannelNetConfig(node_size=data_config.node_size,
time_series_size=data_config.time_series_size,
node_feature_size=data_config.node_feature_size,
num_classes=data_config.num_class
)
model_config.class_weight = data_config.class_weight
model = EEGChannelNet(model_config)
elif args.model == "TCANet":
model_config = TCANetConfig(node_size=data_config.node_size,
time_series_size=data_config.time_series_size,
node_feature_size=data_config.node_feature_size,
num_classes=data_config.num_class
)
model_config.class_weight = data_config.class_weight
model = TCANet(model_config)
elif args.model == "TCACNet":
model_config = TCACNetConfig(node_size=data_config.node_size,
time_series_size=data_config.time_series_size,
node_feature_size=data_config.node_feature_size,
num_classes=data_config.num_class
)
model_config.class_weight = data_config.class_weight
model = TCACNet(model_config)
elif args.model == "SBLEST":
model_config = SBLESTConfig(node_size=data_config.node_size,
time_series_size=data_config.time_series_size,
node_feature_size=data_config.node_feature_size,
num_classes=data_config.num_class
)
model_config.class_weight = data_config.class_weight
model = SBLEST(model_config)
else:
model = None
model_config = None
if model is not None:
init_parameters(model, model_config)
return model, model_config
# Path: utils/optimizer.py
def init_optimizer(model: torch.nn.Module, optimizer_config=None) -> torch.optim.Optimizer:
parameters = {
'lr': optimizer_config.learning_rate,
'weight_decay': optimizer_config.weight_decay
}
if optimizer_config.no_weight_decay:
params, _ = get_param_group_no_wd(model,
match_rule=optimizer_config.match_rule,
except_rule=optimizer_config.except_rule)
else:
params = list(model.parameters())
logging.info(f'Parameters [normal] length [{len(params)}]')
parameters['params'] = params
optimizer_type = optimizer_config.optimizer
if optimizer_type == 'SGD':
parameters['momentum'] = optimizer_config.momentum
parameters['nesterov'] = optimizer_config.nesterov
return getattr(torch.optim, optimizer_type)(**parameters)
# Path: utils/schedule.py
def init_schedule(optimizer, args, t_total):
if args.schedule == 'cos':
schedule = CosineAnnealingLR(optimizer, eta_min=args.target_learning_rate, T_max=t_total)
elif args.schedule == 'cos_w':
schedule = get_cosine_annealing_schedule_with_warmup(optimizer, eta_max=args.learning_rate,
eta_min=args.target_learning_rate,
num_warmup_steps=args.warmup_steps,
num_training_steps=t_total)
elif args.schedule == 'linear':
schedule = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,
num_training_steps=t_total)
elif args.schedule == 'one_cycle':
schedule = OneCycleLR(optimizer,
max_lr=args.max_learning_rate,
epochs=args.num_epochs,
steps_per_epoch=t_total // args.num_epochs,
pct_start=0.2,
div_factor=args.max_learning_rate/args.learning_rate,
final_div_factor=1000)
else:
schedule = None
return schedule
# Path: utils/accuracy.py
def accuracy(output: torch.Tensor, target: torch.Tensor, top_k=(1,)) -> List[float]:
"""Computes the precision@k for the specified values of k"""
max_k = max(top_k)
batch_size = target.size(0)
_, predict = output.topk(max_k, 1, True, True)
predict = predict.t()
correct = predict.eq(target.view(1, -1).expand_as(predict))
res = []
for k in top_k:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size).item())
return res
# Path: utils/trainer.py
import json
import os
import wandb
import logging
import torch
import numpy as np
from timeit import default_timer as timer
from abc import abstractmethod
from torch.nn import functional as F
from tqdm import tqdm
from sklearn.metrics import roc_auc_score, accuracy_score
from sklearn.metrics import precision_recall_fscore_support, classification_report
from config import init_model_config
from .optimizer import init_optimizer
from .schedule import init_schedule
from .accuracy import accuracy
from data import *
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class Trainer(object):
def __init__(self, args, local_rank=0, task_id=0, subject_id=0):
self.task_id = task_id
self.args = args
self.local_rank = local_rank
self.subject_id = subject_id
self.data_config = DataConfig(args)
self.data_loaders = self.load_datasets()
model, self.model_config = init_model_config(args, self.data_config)
if args.do_parallel:
# self.model = torch.nn.DataParallel(self.model)
self.device = f'cuda:{self.local_rank}' \
if args.device != 'cpu' and torch.cuda.is_available() else args.device
self.model = model.to(args.device)
self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[self.local_rank],
find_unused_parameters=True)
else:
self.device = f'cuda' \
if args.device != 'cpu' and torch.cuda.is_available() else args.device
self.model = model.to(args.device)
# self.model = torch.compile(model, dynamic=True)
self.optimizer = None
self.scheduler = None
self.best_result = None
self.test_result = None
@abstractmethod
def prepare_inputs_kwargs(self, inputs):
return {}
def load_datasets(self):
# datasets = eval(
# f"load_{self.args.dataset}_data")(self.data_config)
datasets = eval(
f"{self.args.dataset}Dataset")(self.data_config, k=self.task_id, subject_id=self.subject_id)
if self.args.do_parallel:
data_loaders = init_distributed_dataloader(self.data_config, datasets)
else:
data_loaders = init_StratifiedKFold_dataloader(self.data_config, datasets)
return data_loaders
def init_components(self):
total = self.args.num_epochs * len(self.data_loaders['train'])
self.optimizer = init_optimizer(self.model, self.args)
self.scheduler = init_schedule(self.optimizer, self.args, total)
def train_epoch(self):
train_dataloader = self.data_loaders['train']
self.model.train()
losses = 0
loss_list = []
for step, inputs in enumerate(train_dataloader):
# with torch.autograd.set_detect_anomaly(True):
input_kwargs = self.prepare_inputs_kwargs(inputs)
outputs = self.model(**input_kwargs)
loss = outputs.loss
if self.data_config.dataset == "ZuCo":
loss.backward()
if step % self.data_config.batch_size == self.data_config.batch_size - 1:
self.optimizer.step()
self.scheduler.step() # Update learning rate schedule
self.optimizer.zero_grad()
else:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step() # Update learning rate schedule
losses += loss.item()
loss_list.append(loss.item())
wandb.log({'Training loss': loss.item(),
'Learning rate': self.optimizer.param_groups[0]['lr']})
return losses / len(loss_list)
def train(self):
total = self.args.num_epochs*len(self.data_loaders['train'])
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(self.data_loaders['train']))
logger.info(" Num Epochs = %d", self.args.num_epochs)
logger.info(" Total train batch size = %d", self.args.batch_size)
logger.info(" warmup steps = %d", self.args.warmup_steps)
logger.info(" Total optimization steps = %d", total)
logger.info(" Save steps = %d", self.args.save_steps)
self.init_components()
if self.args.visualize:
self.visualize()
for epoch in tqdm(range(1, self.args.num_epochs + 1), desc="epoch", ncols=0):
start_time = timer()
train_loss = self.train_epoch()
end_time = timer()
self.data_config.alpha = self.data_config.beta = \
| 0.5 * (self.args.num_epochs - epoch) / self.args.num_epochs + 0.5
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: YihePang/DisoFLAG
# Path: args.py
class Args_config:
def __init__(self):
self.use_gpu = True
self.max_seq_length = 128
self.feature_dim = 1024
self.encoder_hidden = 512
self.decoder_hidden = 1024
self.decoder_dropout = 0.3
self.model_path = './saved_model'
self.epochs = 50
self.batch_size = 16
self.learning_rate = 0.00005
# Path: prepare_model_data.py
def data_2_samples(args, data_file_name, is_slice):
seq_id,seq,seq_label_IDP,seq_label_F1,seq_label_F2,seq_label_F3,seq_label_F4,seq_label_F5,seq_label_F6,seq_T5_feature = file_2_data(data_file_name)
# 标签处理
res_mask_0 = residue_mask(seq_label_IDP)
res_mask_1 = residue_mask(seq_label_F1)
res_mask_2 = residue_mask(seq_label_F2)
res_mask_3 = residue_mask(seq_label_F3)
res_mask_4 = residue_mask(seq_label_F4)
res_mask_5 = residue_mask(seq_label_F5)
res_mask_6 = residue_mask(seq_label_F6)
seq_mask = sequence_mask(seq) #
seq_label_0 = lable_2_value(seq_label_IDP)
seq_label_1 = lable_2_value(seq_label_F1)
seq_label_2 = lable_2_value(seq_label_F2)
seq_label_3 = lable_2_value(seq_label_F3)
seq_label_4 = lable_2_value(seq_label_F4)
seq_label_5 = lable_2_value(seq_label_F5)
seq_label_6 = lable_2_value(seq_label_F6)
if is_slice == True:
seq_id,seq,seq_label_0,seq_label_1,seq_label_2,seq_label_3,seq_label_4,seq_label_5,seq_label_6,seq_T5_feature,res_mask_0,res_mask_1,res_mask_2,res_mask_3,res_mask_4,res_mask_5,res_mask_6,seq_mask = slice_data(seq_id,seq,seq_label_0,seq_label_1,seq_label_2,seq_label_3,seq_label_4,seq_label_5,seq_label_6,
seq_T5_feature,
res_mask_0,res_mask_1,res_mask_2,res_mask_3,res_mask_4,res_mask_5,res_mask_6,
seq_mask,args.max_seq_length)
# print("after slice lengths: ",len(seq_id))
# padding
pad_seq_label_0 = seq_lable_padding(seq_label_0, args.max_seq_length)
pad_seq_label_1 = seq_lable_padding(seq_label_1, args.max_seq_length)
pad_seq_label_2 = seq_lable_padding(seq_label_2, args.max_seq_length)
pad_seq_label_3 = seq_lable_padding(seq_label_3, args.max_seq_length)
pad_seq_label_4 = seq_lable_padding(seq_label_4, args.max_seq_length)
pad_seq_label_5 = seq_lable_padding(seq_label_5, args.max_seq_length)
pad_seq_label_6 = seq_lable_padding(seq_label_6, args.max_seq_length)
pad_seq_T5_feature = seq_feature_padding(seq_T5_feature, args.max_seq_length)
pad_res_mask_0 = mask_padding(res_mask_0,args.max_seq_length)
pad_res_mask_1 = mask_padding(res_mask_1,args.max_seq_length)
pad_res_mask_2 = mask_padding(res_mask_2,args.max_seq_length)
pad_res_mask_3 = mask_padding(res_mask_3,args.max_seq_length)
pad_res_mask_4 = mask_padding(res_mask_4,args.max_seq_length)
pad_res_mask_5 = mask_padding(res_mask_5,args.max_seq_length)
pad_res_mask_6 = mask_padding(res_mask_6,args.max_seq_length)
pad_seq_mask = mask_padding(seq_mask,args.max_seq_length)
data_samples = []
for i in range(len(seq_id)):
one_sample = []
one_sample.append(seq_id[i])
one_sample.append(seq[i])
# label
one_sample.append(pad_seq_label_0[i]) # (padding)-----------------------2 IDP
one_sample.append(pad_seq_label_1[i]) # (padding)-----------------------3 PB
one_sample.append(pad_seq_label_2[i]) # (padding)-----------------------4 DB
one_sample.append(pad_seq_label_3[i]) # (padding)-----------------------5 RB
one_sample.append(pad_seq_label_4[i]) # (padding)-----------------------6 IB
one_sample.append(pad_seq_label_5[i]) # (padding)-----------------------7 LB
one_sample.append(pad_seq_label_6[i]) # (padding)-----------------------8 Link
# length
one_sample.append(len(seq[i])) # -----------------------------9
one_sample.append(pad_seq_T5_feature[i]) # (padding)--------------------10
# one_sample.append(pad_seq_BERT_feature[i]) # (padding)------------------11
# one_sample.append(pad_seq_IDP_feature[i]) # (padding)-------------------12
one_sample.append(seq_label_0[i]) # ---------13
one_sample.append(seq_label_1[i]) # ---------14
one_sample.append(seq_label_2[i]) # ---------15
one_sample.append(seq_label_3[i]) # ---------16
one_sample.append(seq_label_4[i]) # ---------17
one_sample.append(seq_label_5[i]) # ---------18
one_sample.append(seq_label_6[i]) # ---------19
# mask
one_sample.append(pad_res_mask_0[i]) #0,1 mask----------------------20
one_sample.append(pad_res_mask_1[i]) #0,1 mask----------------------21
one_sample.append(pad_res_mask_2[i]) #0,1 mask----------------------22
one_sample.append(pad_res_mask_3[i]) #0,1 mask----------------------23
one_sample.append(pad_res_mask_4[i]) #0,1 mask----------------------24
one_sample.append(pad_res_mask_5[i]) #0,1 mask----------------------25
one_sample.append(pad_res_mask_6[i]) #0,1 mask----------------------26
one_sample.append(pad_seq_mask[i]) #seq -----------------------27
data_samples.append(one_sample)
return data_samples
# Path: prepare_model_data.py
def Batches_data(data_samples, batch_size, is_train): #all data samples
# if is_train == True:
# random.shuffle(data_samples)
batches = []
data_len = len(data_samples)
batch_nums = int(data_len/batch_size)
def genNextSamples():
for i in range(0, batch_nums*batch_size, batch_size):
yield data_samples[i: i + batch_size]
if data_len % batch_size != 0:
last_num = data_len - batch_nums*batch_size
up_num = batch_size - last_num
l1 = data_samples[batch_nums*batch_size : data_len]
l2 = data_samples[0: up_num]
yield l1+l2
for one_data_samples in genNextSamples():
one_batch = one_batch_data(one_data_samples)
batches.append(one_batch)
return batches
# Path: load_data.py
def load_file_2_data(file_path):
loadfile = open(file_path,"r")
load_f = []
for line in loadfile:
line=line.strip('\n')
load_f.append(line)
loadfile.close()
load_data = []
for i in range(len(load_f)):
if i % 2 == 0:
load_data.append(load_f[i:i+2]) #one data: [0]--id [1]--seq
# print("load_file: ",file_path," data length: ",len(load_data))
return load_data
# Path: model.py
class Seq2FUN(nn.Module):
def __init__(self,args):
super().__init__()
self.model_name = 'Model'
self.args = args
self.encoder = Encoder(self.args)
self.decoder = Decoder(self.args)
self.Graph_decoder = Graph_decoder(self.args)
self.IDR_trans = nn.Linear(self.args.decoder_hidden, self.args.decoder_hidden)
self.PB_trans = nn.Linear(self.args.decoder_hidden, self.args.decoder_hidden)
self.DB_trans = nn.Linear(self.args.decoder_hidden, self.args.decoder_hidden)
self.RB_trans = nn.Linear(self.args.decoder_hidden, self.args.decoder_hidden)
self.IB_trans = nn.Linear(self.args.decoder_hidden, self.args.decoder_hidden)
self.LB_trans = nn.Linear(self.args.decoder_hidden, self.args.decoder_hidden)
self.Link_trans = nn.Linear(self.args.decoder_hidden, self.args.decoder_hidden)
self.IDP_cal_prob = nn.Linear(in_features=128, out_features=1, bias =True)
self.PB_cal_prob = nn.Linear(in_features=128, out_features=1, bias =True)
self.DB_cal_prob = nn.Linear(in_features=128, out_features=1, bias =True)
self.RB_cal_prob = nn.Linear(in_features=128, out_features=1, bias =True)
self.IB_cal_prob = nn.Linear(in_features=128, out_features=1, bias =True)
self.LB_cal_prob = nn.Linear(in_features=128, out_features=1, bias =True)
self.Link_cal_prob = nn.Linear(in_features=128, out_features=1, bias =True)
self.activate = nn.Sigmoid()
def forward(self, input_feature):
# print("input_feature:",input_feature.shape) # [B, L, 1024]
# Bi-GRU Encoder
encoder_outputs, encoder_hiddens = self.encoder(input_feature)
# Decoder_attention
decoder_outputs = self.decoder(encoder_outputs, encoder_hiddens) # [B, L, 1024]
# IDR feature
IDR_vec = self.IDR_trans(decoder_outputs) # [B, L, 1024]
PB_vec = self.PB_trans(decoder_outputs)
DB_vec = self.DB_trans(decoder_outputs)
RB_vec = self.RB_trans(decoder_outputs)
IB_vec = self.IB_trans(decoder_outputs)
LB_vec = self.LB_trans(decoder_outputs)
Link_vec = self.Link_trans(decoder_outputs)
# Gragh decoder
IDR_F_vec, PB_F_vec, DB_F_vec, RB_F_vec, IB_F_vec, LB_F_vec, Link_F_vec, Graph_Ws, Graph_bs, Graph_adjs = self.Graph_decoder(IDR_vec, PB_vec, DB_vec, RB_vec, IB_vec, LB_vec, Link_vec)
# cal_probs
IDR_probs = t.squeeze(self.activate(self.IDP_cal_prob(IDR_F_vec))) # [B, L]
PB_probs = t.squeeze(self.activate(self.PB_cal_prob(PB_F_vec))) # [B, L]
DB_probs = t.squeeze(self.activate(self.DB_cal_prob(DB_F_vec))) # [B, L]
RB_probs = t.squeeze(self.activate(self.RB_cal_prob(RB_F_vec))) # [B, L]
IB_probs = t.squeeze(self.activate(self.IB_cal_prob(IB_F_vec))) # [B, L]
LB_probs = t.squeeze(self.activate(self.LB_cal_prob(LB_F_vec))) # [B, L]
Link_probs = t.squeeze(self.activate(self.Link_cal_prob(Link_F_vec))) # [B, L]
return IDR_probs, PB_probs, DB_probs, RB_probs, IB_probs, LB_probs, Link_probs
# Path: evaluator.py
def write_2_file(data_file, data_samples, data_batches, IDR_probs, PB_probs, DB_probs, RB_probs, IB_probs, LB_probs, Link_probs, file_name,output_type):
batch_size = np.array(data_batches[0].seq_label_0).shape[0]
max_length = np.array(data_batches[0].seq_label_0).shape[1]
slice_length = len(data_samples)
pred_logs_0 = []
pred_logs_1 = []
pred_logs_2 = []
pred_logs_3 = []
pred_logs_4 = []
pred_logs_5 = []
pred_logs_6 = []
for b in range(len(IDR_probs)):
# IDR
pred_logs_0 += list(IDR_probs[b])
# PB
pred_logs_1 += list(PB_probs[b])
# DB
pred_logs_2 += list(DB_probs[b])
# RB
pred_logs_3 += list(RB_probs[b])
# IB
pred_logs_4 += list(IB_probs[b])
# LB
pred_logs_5 += list(LB_probs[b])
# Link
pred_logs_6 += list(Link_probs[b])
pred_logs_0 = pred_logs_0[:slice_length]
pred_logs_1 = pred_logs_1[:slice_length]
pred_logs_2 = pred_logs_2[:slice_length]
pred_logs_3 = pred_logs_3[:slice_length]
pred_logs_4 = pred_logs_4[:slice_length]
pred_logs_5 = pred_logs_5[:slice_length]
pred_logs_6 = pred_logs_6[:slice_length]
pred_seq_ids = []
for d in range(len(data_batches)):
batch_data = data_batches[d]
for i in range(len(batch_data.seq_id)): #[batch_size]
pred_seq_ids.append(str(batch_data.seq_id[i]).replace('\r','')) # pred_seq_ids
pred_seq_ids = pred_seq_ids[:slice_length]
org_ids = list(set(pred_seq_ids))
org_seq_pred_0 = []
org_seq_pred_1 = []
org_seq_pred_2 = []
org_seq_pred_3 = []
org_seq_pred_4 = []
org_seq_pred_5 = []
org_seq_pred_6 = []
for i in range(len(org_ids)):
find_id = org_ids[i]
one_pred_0 = []
one_pred_1 = []
one_pred_2 = []
one_pred_3 = []
one_pred_4 = []
one_pred_5 = []
one_pred_6 = []
for j in range(len(pred_seq_ids)):
if pred_seq_ids[j] == find_id:
one_pred_0 += list(pred_logs_0[j])
one_pred_1 += list(pred_logs_1[j])
one_pred_2 += list(pred_logs_2[j])
one_pred_3 += list(pred_logs_3[j])
one_pred_4 += list(pred_logs_4[j])
one_pred_5 += list(pred_logs_5[j])
one_pred_6 += list(pred_logs_6[j])
org_seq_pred_0.append([find_id,one_pred_0])
org_seq_pred_1.append([find_id,one_pred_1])
org_seq_pred_2.append([find_id,one_pred_2])
org_seq_pred_3.append([find_id,one_pred_3])
org_seq_pred_4.append([find_id,one_pred_4])
org_seq_pred_5.append([find_id,one_pred_5])
org_seq_pred_6.append([find_id,one_pred_6])
pred_final_ordered_0 = []
pred_final_ordered_1 = []
pred_final_ordered_2 = []
pred_final_ordered_3 = []
pred_final_ordered_4 = []
pred_final_ordered_5 = []
pred_final_ordered_6 = []
for i in range(len(data_file)):
find_id = str(str(data_file[i][0]).replace('>','')).replace('\r','')
for j in range(len(org_seq_pred_0)):
if org_seq_pred_0[j][0] == find_id:
pred_final_ordered_0.append(org_seq_pred_0[j][-1][:len(data_file[i][1])])
pred_final_ordered_1.append(org_seq_pred_1[j][-1][:len(data_file[i][1])])
pred_final_ordered_2.append(org_seq_pred_2[j][-1][:len(data_file[i][1])])
pred_final_ordered_3.append(org_seq_pred_3[j][-1][:len(data_file[i][1])])
pred_final_ordered_4.append(org_seq_pred_4[j][-1][:len(data_file[i][1])])
pred_final_ordered_5.append(org_seq_pred_5[j][-1][:len(data_file[i][1])])
pred_final_ordered_6.append(org_seq_pred_6[j][-1][:len(data_file[i][1])])
write_file = open(file_name,"w")
for i in range(len(data_file)):
write_file.write(data_file[i][0]+'\n')
write_file.write(data_file[i][1]+'\n')
one_seq_len = len(data_file[i][1].replace('\r',''))
pred_0 = [round(j,4) for j in pred_final_ordered_0[i]]
pred_1 = [round(j,4) for j in pred_final_ordered_1[i]]
pred_2 = [round(j,4) for j in pred_final_ordered_2[i]]
pred_3 = [round(j,4) for j in pred_final_ordered_3[i]]
pred_4 = [round(j,4) for j in pred_final_ordered_4[i]]
pred_5 = [round(j,4) for j in pred_final_ordered_5[i]]
pred_6 = [round(j,4) for j in pred_final_ordered_6[i]]
pred_0 = pred_0[0:one_seq_len]
pred_1 = pred_1[0:one_seq_len]
pred_2 = pred_2[0:one_seq_len]
pred_3 = pred_3[0:one_seq_len]
pred_4 = pred_4[0:one_seq_len]
pred_5 = pred_5[0:one_seq_len]
pred_6 = pred_6[0:one_seq_len]
if output_type == 'b':
# best ROC performance
pred_0 = [1 if p > 0.2340 else 0 for p in pred_0]
pred_1 = [1 if p > 0.1678 else 0 for p in pred_1]
pred_2 = [1 if p > 0.0163 else 0 for p in pred_2]
pred_3 = [1 if p > 0.006 else 0 for p in pred_3]
pred_4 = [1 if p > 0.0011 else 0 for p in pred_4]
pred_5 = [1 if p > 0.0109 else 0 for p in pred_5]
pred_6 = [1 if p > 0.0254 else 0 for p in pred_6]
write_file.write("".join(str(j) for j in pred_0))
write_file.write('\n')
write_file.write("".join(str(j) for j in pred_1))
write_file.write('\n')
write_file.write("".join(str(j) for j in pred_2))
write_file.write('\n')
write_file.write("".join(str(j) for j in pred_3))
write_file.write('\n')
write_file.write("".join(str(j) for j in pred_4))
write_file.write('\n')
write_file.write("".join(str(j) for j in pred_5))
write_file.write('\n')
write_file.write("".join(str(j) for j in pred_6))
write_file.write('\n')
else:
write_file.write(",".join(str(j) for j in pred_0))
write_file.write('\n')
write_file.write(",".join(str(j) for j in pred_1))
write_file.write('\n')
write_file.write(",".join(str(j) for j in pred_2))
write_file.write('\n')
write_file.write(",".join(str(j) for j in pred_3))
write_file.write('\n')
write_file.write(",".join(str(j) for j in pred_4))
write_file.write('\n')
write_file.write(",".join(str(j) for j in pred_5))
write_file.write('\n')
write_file.write(",".join(str(j) for j in pred_6))
write_file.write('\n')
print("Find results : ",file_name)
write_file.close()
# Path: model_running.py
import numpy as np
import random
import os
import torch as t
import sys
from args import Args_config
from prepare_model_data import data_2_samples, Batches_data
from load_data import load_file_2_data
from torch import nn
from model import Seq2FUN
from evaluator import write_2_file
# -*- coding: utf-8 -*-
# @Author: Yihe Pang
# @Date: 2023-06-13 10:08:51
# @Last Modified by: Yihe Pang
# @Last Modified time: 2023-06-14 22:43:30
def FLAG_model_running(input_data_file, output_file_name, output_type):
args = Args_config()
test_data = data_2_samples(args = args,
data_file_name = input_data_file,
is_slice = True)
for root, dirs, files in os.walk(args.model_path):
for one_file in files:
model_file = args.model_path+'/'+one_file
# print("model_file:",model_file)
| model = t.load(model_file, map_location='cpu') |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: BouncyKoishi/ChuCaoQi-Bot
# Path: plugins/scBattle/scBattleObj.py
class Battle:
def __init__(self, creatorId, groupId) -> None:
self.creatorId = creatorId
self.joinerId = None
self.creator: Battler or None = None
self.joiner: Battler or None = None
self.lastTurnInfoImg = None
self.groupId = groupId
self.gameRound = None
self.stateCardId = 0
self.spellCardSettled = []
self.turnInfoMsg, self.creatorCardMsg, self.joinerCardMsg = "", "", ""
self.cAtk, self.jAtk, self.cHurt, self.jHurt = 0, 0, 0, 0
async def setCreator(self):
creatorName = await getBattlerName(self.creatorId, self.groupId)
self.creator = Battler(self.creatorId, creatorName)
async def joinBattle(self, joinerId) -> None:
self.joinerId = joinerId
joinerName = await getBattlerName(self.joinerId, self.groupId)
self.joiner = Battler(self.joinerId, joinerName)
self.creator.setEnemy(self.joiner)
self.joiner.setEnemy(self.creator)
self.gameRound = 0
async def setSingleBattleEnemy(self, enemyName, enemyCardList):
self.joinerId = -1
self.joiner = Battler(self.joinerId, enemyName)
self.joiner.cardIdList = enemyCardList
self.creator.setEnemy(self.joiner)
self.joiner.setEnemy(self.creator)
self.spellCardSettled.append(self.joinerId)
self.gameRound = 0
def gameStart(self) -> None:
self.creator.setNewMainCard()
self.joiner.setNewMainCard()
self.roundStart()
self.creatorCardMsg = self.creator.getCardDescribe()
self.joinerCardMsg = self.joiner.getCardDescribe()
self.turnInfoMsg += self.creator.nowCard.onCardSet()
self.turnInfoMsg += self.joiner.nowCard.onCardSet()
self.turnInfoMsg += self.creator.runEffect("onCardSet")
self.turnInfoMsg += self.joiner.runEffect("onCardSet")
self.turnInfoMsg += f'-------------------------------------------------------\n'
def roundStart(self):
self.gameRound += 1
self.turnInfoMsg += f'-- 宣言回目 {self.gameRound} --\n'
self.turnInfoMsg += f'{self.creator.name} 当前血量:{self.creator.nowHp}\n'
self.turnInfoMsg += f'{self.joiner.name} 当前血量:{self.joiner.nowHp}\n'
def turnStart(self):
self.turnInfoMsg += self.creator.nowCard.onTurnStart()
self.turnInfoMsg += self.joiner.nowCard.onTurnStart()
self.turnInfoMsg += self.creator.runEffect('onTurnStart')
self.turnInfoMsg += self.joiner.runEffect('onTurnStart')
def turnGetBasePoint(self):
self.cAtk, pointMsg1 = self.creator.getPoints()
self.jAtk, pointMsg2 = self.joiner.getPoints()
self.turnInfoMsg += (pointMsg1 + pointMsg2)
def turnHurtValueCalc(self):
self.cHurt, hurtMsg1 = self.creator.calcHurt(self.jAtk)
self.jHurt, hurtMsg2 = self.joiner.calcHurt(self.cAtk)
self.turnInfoMsg += (hurtMsg1 + hurtMsg2)
def turnHpChange(self):
self.turnInfoMsg += self.creator.battleHurt(self.cHurt)
self.turnInfoMsg += self.joiner.battleHurt(self.jHurt)
def turnEnd(self):
self.turnInfoMsg += self.creator.runEffect('onTurnEnd')
self.turnInfoMsg += self.joiner.runEffect('onTurnEnd')
self.turnInfoMsg += self.creator.nowCard.onTurnEnd()
self.turnInfoMsg += self.joiner.nowCard.onTurnEnd()
self.turnInfoMsg += f'-------------------------------------------------------\n'
self.cleanTurnTempData()
def cleanTurnTempData(self):
self.creator.cleanTurnTempData()
self.joiner.cleanTurnTempData()
self.cAtk, self.jAtk, self.cHurt, self.jHurt = 0, 0, 0, 0
def cardBreakJudge(self):
creatorBreak = self.creator.shouldChangeCard()
joinerBreak = self.joiner.shouldChangeCard()
if creatorBreak and joinerBreak:
self.turnInfoMsg += f'-------------------------------------------------------\n'
self.turnInfoMsg += f'{self.creator.name} 当前符卡被击破!\n'
self.turnInfoMsg += self.creator.runEffect("onCardBreak")
self.turnInfoMsg += self.joiner.runEffect("onEnemyCardBreak")
self.turnInfoMsg += self.creator.nowCard.onCardBreak()
self.turnInfoMsg += f'{self.joiner.name} 当前符卡被击破!\n'
self.turnInfoMsg += self.joiner.runEffect("onCardBreak")
self.turnInfoMsg += self.creator.runEffect("onEnemyCardBreak")
self.turnInfoMsg += self.joiner.nowCard.onCardBreak()
self.lastTurnInfoImg = self.getTurnInfoImg()
self.cleanTurnTempData()
time.sleep(4)
gameContinueA = self.creator.setNewMainCard()
gameContinueB = self.joiner.setNewMainCard()
if not gameContinueA or not gameContinueB:
return True, True
self.roundStart()
self.creatorCardMsg = self.creator.getCardDescribe()
self.joinerCardMsg = self.joiner.getCardDescribe()
self.turnInfoMsg += self.creator.nowCard.onCardSet()
self.turnInfoMsg += self.joiner.nowCard.onCardSet()
self.turnInfoMsg += self.creator.runEffect("onCardSet")
self.turnInfoMsg += self.joiner.runEffect("onCardSet")
self.turnInfoMsg += f'-------------------------------------------------------\n'
return True, False
elif creatorBreak:
self.turnInfoMsg += f'-------------------------------------------------------\n'
self.turnInfoMsg += f'{self.creator.name} 当前符卡被击破!\n'
self.turnInfoMsg += self.creator.runEffect("onCardBreak")
self.turnInfoMsg += self.joiner.runEffect("onEnemyCardBreak")
self.turnInfoMsg += self.creator.nowCard.onCardBreak()
self.lastTurnInfoImg = self.getTurnInfoImg()
self.cleanTurnTempData()
time.sleep(4)
gameContinue = self.creator.setNewMainCard()
if not gameContinue:
return True, True
self.roundStart()
self.creatorCardMsg = self.creator.getCardDescribe()
self.turnInfoMsg += self.creator.nowCard.onCardSet()
self.turnInfoMsg += self.creator.runEffect("onCardSet")
self.turnInfoMsg += f'-------------------------------------------------------\n'
return True, False
elif joinerBreak:
self.turnInfoMsg += f'-------------------------------------------------------\n'
self.turnInfoMsg += f'{self.joiner.name} 当前符卡被击破!\n'
self.turnInfoMsg += self.joiner.runEffect("onCardBreak")
self.turnInfoMsg += self.creator.runEffect("onEnemyCardBreak")
self.turnInfoMsg += self.joiner.nowCard.onCardBreak()
self.lastTurnInfoImg = self.getTurnInfoImg()
self.cleanTurnTempData()
time.sleep(4)
gameContinue = self.joiner.setNewMainCard()
if not gameContinue:
return True, True
self.roundStart()
self.joinerCardMsg = self.joiner.getCardDescribe()
self.turnInfoMsg += self.joiner.nowCard.onCardSet()
self.turnInfoMsg += self.joiner.runEffect("onCardSet")
self.turnInfoMsg += f'-------------------------------------------------------\n'
return True, False
return False, False
def getTurnInfoImg(self):
sizeBig = 25
sizeMid = 20
sizeSmall = 15
rowSpacing = 3
width = 900
margin = 20
font1 = ImageFont.truetype("HarmonyOS_Sans_SC_Bold", sizeBig)
font2 = ImageFont.truetype("HarmonyOS_Sans_SC_Regular", sizeMid)
font3 = ImageFont.truetype("HarmonyOS_Sans_SC_Light", sizeSmall)
baseHeight = sizeBig + sizeMid * 6 + rowSpacing * 6 + margin * 2
turnInfoMsgLineCount = self.turnInfoMsg.count('\n') + 1
turnInfoMsgHeight = (sizeSmall + rowSpacing) * turnInfoMsgLineCount + margin * 2
totalHeight = baseHeight + turnInfoMsgHeight
img = Image.new(mode="RGB", size=(width, totalHeight), color=(255, 255, 255))
draw = ImageDraw.Draw(img)
draw.text((margin, margin), self.creator.name, font=font1, fill=(96, 16, 16))
draw.text((margin, margin + sizeBig + rowSpacing), self.creatorCardMsg, font=font2, fill=(96, 16, 16))
draw.text((width / 2, margin), self.joiner.name, font=font1, fill=(16, 16, 96))
draw.text((width / 2, margin + sizeBig + rowSpacing), self.joinerCardMsg, font=font2, fill=(16, 16, 96))
draw.line(xy=(margin, baseHeight, width - margin, baseHeight), fill=(100, 100, 100), width=2)
draw.text((margin, baseHeight + margin), self.turnInfoMsg, font=font3, fill=(0, 0, 0))
img.save("temp.jpg", format="JPEG", quality=95)
self.turnInfoMsg = ""
return getImgBase64(r"temp.jpg")
def endGameCheck(self):
isEndGame = False
loserName = []
if self.creator.shouldEnd():
isEndGame = True
loserName.append(self.creator.name)
if self.joiner.shouldEnd():
isEndGame = True
loserName.append(self.joiner.name)
time.sleep(4)
return isEndGame, loserName
# Path: plugins/scBattle/scBattlerObj.py
class Battler:
def __init__(self, userId, userName) -> None:
self.id = userId
self.name = userName
self.cardIdList = [0, 0, 0, 0, 0]
self.nowCardOrder = 0
self.states = []
self.effects = []
self.nowHp = 0
self.nowCard = None
self.enemy = None
self.attack, self.defence, self.dodge = 0, 0, 0
self.dodSuccess, self.defSuccess = None, None
def setEnemy(self, enemy):
self.enemy = enemy
def battleHurt(self, value):
value, beforeHurtInfo = self.runEffect("beforeHurt", value)
self.nowHp -= value
_, commonHurtInfo = self.runEffect("onHurt", value)
_, battleHurtInfo = self.runEffect("onBattleHurt", value)
return beforeHurtInfo + commonHurtInfo + battleHurtInfo
def effectHurt(self, value):
value, beforeHurtInfo = self.runEffect("beforeHurt", value)
self.nowHp -= value
_, commonHurtInfo = self.runEffect("onHurt", value)
_, effectHurtInfo = self.runEffect("onEffectHurt", value)
return beforeHurtInfo + commonHurtInfo + effectHurtInfo
def heal(self, value):
value, healInfo = self.runEffect("onHealValueCalc", value)
self.nowHp += value
self.nowHp = min(self.nowHp, self.nowCard.cardHp)
healInfo += self.runEffect("onHeal")
return healInfo
def appendEffect(self, effectId, effectAmount):
if not effectId:
return
effectIdList = [effect.id for effect in self.effects] if self.effects else []
if effectId in effectIdList:
self.effects[effectIdList.index(effectId)].stackEffect(effectAmount)
else:
effect = utils.getEffectObjById(effectId, effectAmount)
effect.setPlayerInfo(self, self.enemy)
self.effects.append(effect)
def appendBorder(self, effectId, borderTurn, borderStrength):
if not effectId:
return
effectIdList = [effect.id for effect in self.effects] if self.effects else []
if effectId in effectIdList:
self.effects[effectIdList.index(effectId)].stackEffect(borderTurn)
else:
border = utils.getEffectObjById(effectId, borderTurn)
border.setPlayerInfo(self, self.enemy)
border.setBorderStrength(borderStrength)
self.effects.append(border)
def removeEffect(self, effectId, effectAmount=0):
if not effectId:
return
effectIdList = [effect.id for effect in self.effects] if self.effects else []
if effectId in effectIdList:
if effectAmount == 0:
self.effects.pop(effectIdList.index(effectId))
else:
self.effects[effectIdList.index(effectId)].reduceEffect(effectAmount)
self.removeEmptyEffect()
def removeEmptyEffect(self):
for effect in self.effects:
if effect.effectAmount == 0:
self.effects.remove(effect)
def runEffect(self, funcName, *args):
effectInfoMsgs = []
for effect in self.effects:
if "Froze" in self.states:
if effect.id != "Freeze" and not isinstance(effect, AbstractBorder):
continue
func = getattr(effect, funcName)
args = func(*args)
args = () if args is None else args
args = (args, ) if not isinstance(args, tuple) else args
if effect.effectInfoMsg:
effectInfoMsgs.append(effect.effectInfoMsg)
effect.effectInfoMsg = ""
effectInfoMsgs = "\n".join(effectInfoMsgs)
self.removeEmptyEffect()
if len(args) == 0:
return effectInfoMsgs
if len(args) == 1:
return args[0], effectInfoMsgs
# 多于一个参数的情况,需要用tuple接收返回参数
return args, effectInfoMsgs
def getPoints(self):
self.attack = utils.runDiceByString(self.nowCard.atkPoint)
self.attack, atkInfo = self.runEffect("onAttackCalc", self.attack)
self.defence = utils.runDiceByString(self.nowCard.defPoint)
self.defence, defInfo = self.runEffect("onDefenceCalc", self.defence)
self.dodge = utils.runDiceByString(self.nowCard.dodPoint)
self.dodge, dodInfo = self.runEffect("onDodgeCalc", self.dodge)
self.attack, self.defence, self.dodge = max(self.attack, 0), max(self.defence, 0), max(self.dodge, 0)
pointInfo = atkInfo + defInfo + dodInfo + f'{self.name} Hp:{self.nowHp} Atk:{self.attack} Def:{self.defence} Dod:{self.dodge}\n'
return self.attack, pointInfo
def calcHurt(self, enemyAtk):
dodSuccess = True if self.dodge >= enemyAtk else False
dodSuccess, dodInfo = self.runEffect('onDodgeSuccessJudge', dodSuccess)
dodSuccess, enemyDodInfo = self.enemy.runEffect('onEnemyDodgeSuccessJudge', dodSuccess)
defSuccess = True
defSuccess, defInfo = self.runEffect('onDefenceSuccessJudge', defSuccess)
defSuccess, enemyDefInfo = self.enemy.runEffect('onEnemyDefenceSuccessJudge', defSuccess)
self.dodSuccess, self.defSuccess = dodSuccess, defSuccess
hurtValue = 0 if dodSuccess else max(enemyAtk - self.defence, 1) if defSuccess else enemyAtk
hurtValue, hurtInfo = self.runEffect('onHurtValueCalc', hurtValue)
hurtValue, enemyHurtInfo = self.enemy.runEffect('onEnemyHurtValueCalc', hurtValue)
hurtValue = 0 if hurtValue < 0 else hurtValue
calcInfo = dodInfo + enemyDodInfo + defInfo + enemyDefInfo + hurtInfo + enemyHurtInfo + f'{self.name} 预计受伤:{hurtValue} \n'
return hurtValue, calcInfo
def getCardDescribe(self):
return self.nowCard.getCardDescribe(self.nowCardOrder)
def cleanTurnTempData(self):
self.attack, self.defence, self.dodge = 0, 0, 0
self.dodSuccess, self.defSuccess = None, None
def shouldChangeCard(self):
return self.nowHp <= 0
def shouldEnd(self):
return self.nowCardOrder > 5
def setNewMainCard(self):
self.nowCardOrder += 1
if self.nowCardOrder > 5:
return False
nowCardId = self.cardIdList[self.nowCardOrder - 1]
self.nowCard = utils.getCardObjById(nowCardId)
self.nowCard.setPlayerInfo(self, self.enemy)
self.nowHp = self.nowCard.cardHp
print(self.nowCard)
return True
# Path: plugins/spellcard_battle.py
from plugins.scBattle.scBattleObj import Battle
from plugins.scBattle.scBattlerObj import Battler
from nonebot import on_command, CommandSession
import plugins.scBattle.scBattleUtils as utils
import dbConnection.kusa_item as itemDB
import re
import string
import codecs
import nonebot
bot = nonebot.get_bot()
battleList = {}
def inBattle(qq) -> Battle or None:
for battle in battleList.values():
if battle.creatorId == qq or battle.joinerId == qq:
return battle
return None
def waitingBattleQQList() -> list:
waitingList = []
for battle in battleList.values():
| if not battle.joinerId: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ilur98/DGQ
# Path: dgq/quant/quant_sequence.py
@torch.no_grad()
def PTQ(model, enc,
qconfig,
nsamples=128, seqlen=2048):
dev = "cuda:0"
layers = get_blocks(model)
layer_kwargs = {}
cache={'i': 0}
layers[0] = layers[0].cuda()
move_embed(model, dev)
dtype = next(iter(model.parameters())).dtype
inps = torch.zeros((nsamples, seqlen, model.config.hidden_size), dtype=dtype, device=dev)
outs = torch.zeros_like(inps)
class Catcher(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, inp, **kwargs):
inps[cache['i']] = inp
cache['i'] += 1
layer_kwargs.update(kwargs)
raise ValueError
layers[0] = Catcher(layers[0])
for batch in enc:
try:
model(batch[0].to(dev))
except ValueError:
pass
del enc
layers[0] = layers[0].module # restore
# inps = inps[0]
layers[0] = layers[0].cpu()
move_embed(model, "cpu")
for i in range(len(layers)):
print(i)
layer = layers[i].to(dev)
full = find_layers(layer, [QuantLinear])
sequential = [list(full.keys())]
set_quant_state(layer, False, False)
prepare_hook(layer, inps, qconfig, layer_kwargs)
if qconfig["meanact"]:
mean_bias(layer)
if qconfig["smoothquant"]:
smooth_module(layer)
if qconfig["kvquant"]:
kvquant(layer)
for names in sequential:
subset = {n: full[n] for n in names}
helpers = {}
for name in subset:
helpers[name] = QuantizerHelper(subset[name])
helpers[name].quantizer.configure(qconfig["wt_quant"]["bits"], perchannel=True, sym=False, mse=False)
def add_batch(name):
def tmp(_, inp, out):
helpers[name].add_batch(inp[0].data, out.data)
return tmp
handles = []
for name in subset:
handles.append(subset[name].register_forward_hook(add_batch(name)))
for j in range(nsamples):
outs[j] = layer(inps[j].unsqueeze(0), **layer_kwargs)[0]
for h in handles:
h.remove()
for name in subset:
if qconfig["wt_quant"]["method"] == "gptq":
scale, zero = helpers[name].gptqquant(percdamp=qconfig["percdamp"], groupsize=qconfig["wt_quant"]["groupsize"], actorder=qconfig["act_order"], name=name)
elif qconfig["wt_quant"]["method"] == "search":
scale, zero, scale8 = helpers[name].searchquant(groupsize=qconfig["wt_quant"]["groupsize"], W4W8=qconfig["wt_quant"]["w4w8"])
elif qconfig["wt_quant"]["method"] == "naive":
scale, zero = helpers[name].naivequant(groupsize=qconfig["wt_quant"]["groupsize"])
else:
raise NotImplemented
if qconfig["wt_quant"]["w4w8"]:
subset[name].packW4W8(scale, zero, scale8)
else:
subset[name].pack(scale, zero)
if qconfig["act_quant"] is not None:
clamp = subset[name].inp_absmax.max()
subset[name].amax = clamp
delattr(subset[name], "inp_absmax")
subset[name].prepare_actfun()
helpers[name].free()
set_quant_state(layer, qconfig['act_quant'] != None, qconfig['wt_quant'] != None)
for j in range(nsamples):
outs[j] = layer(inps[j].unsqueeze(0), **layer_kwargs)[0]
layers[i] = layer.cpu()
del layer
# del helpers
torch.cuda.empty_cache()
inps, outs = outs, inps
# Path: dgq/utils/datautils.py
def get_loaders(name, nsamples=128, seed=0, seqlen=2048, model=''):
if 'wikitext2' in name:
return get_wikitext2(nsamples, seed, seqlen, model)
if 'ptb' in name:
if 'new' in name:
return get_ptb_new(nsamples, seed, seqlen, model)
return get_ptb(nsamples, seed, seqlen, model)
if 'c4' in name:
if 'new' in name:
return get_c4_new(nsamples, seed, seqlen, model)
return get_c4(nsamples, seed, seqlen, model)
# Path: dgq/utils/datautils.py
def prepare_mmlu(model, mmlu_dataset):
from transformers import AutoTokenizer
try:
tokenizer = AutoTokenizer.from_pretrained(model, use_fast=False)
except:
tokenizer = AutoTokenizer.from_pretrained(model, use_fast=True)
if 'llama' in model.lower():
tokenizer.eos_token = "</s>"
tokenizer.eos_token_id = 2 # OPT eos-token-id
# add pad token if not present
tokenizer.pad_token = tokenizer.eos_token
tokenizer.pad_token_id = tokenizer.eos_token_id
if mmlu_dataset == 'mmlu-zs':
mmlu_dataset = load_dataset("json", data_files={
'eval': 'data/mmlu/zero_shot_mmlu_val.json',
'test': 'data/mmlu/zero_shot_mmlu_test.json',
})
mmlu_dataset = mmlu_dataset.remove_columns('subject')
# MMLU Five-shot (Eval/Test only)
elif mmlu_dataset == 'mmlu' or mmlu_dataset == 'mmlu-fs':
mmlu_dataset = load_dataset("json", data_files={
'eval': 'data/mmlu/five_shot_mmlu_val.json',
'test': 'data/mmlu/five_shot_mmlu_test.json',
})
# mmlu_dataset = mmlu_dataset[mmlu_split]
# mmlu_dataset = mmlu_dataset.select(range(nsamples))
abcd_idx = [
tokenizer("A", add_special_tokens=False).input_ids[0],
tokenizer("B", add_special_tokens=False).input_ids[0],
tokenizer("C", add_special_tokens=False).input_ids[0],
tokenizer("D", add_special_tokens=False).input_ids[0],
]
data_collator = DataCollatorForCausalLM(
tokenizer=tokenizer,
source_max_len=2048,
target_max_len=512,
train_on_source=False,
predict_with_generate=False
)
mmlu_dataloader_test = DataLoader(mmlu_dataset['test'],
collate_fn=data_collator,
sampler=SequentialSampler(mmlu_dataset['test']),
batch_size=2)
return mmlu_dataset['test'], mmlu_dataloader_test, abcd_idx
# Path: dgq/utils/evalutils.py
@torch.no_grad()
def model_eval(model, testenc, dev, local_args=None):
testenc = testenc.input_ids
nsamples = testenc.numel() // model.seqlen
# model = model.to(dev)
model.eval()
model.config.use_cache = False
# testenc = testenc.to(dev)
layers = get_blocks(model)
layer_kwargs = {}
cache={'i': 0}
layers[0] = layers[0].to(dev)
move_embed(model, dev)
dtype = next(iter(model.parameters())).dtype
inps = torch.zeros((nsamples, model.seqlen, model.config.hidden_size), dtype=dtype, device=dev)
torch.cuda.memory_summary()
class Catcher(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, inp, **kwargs):
inps[cache['i']] = inp
cache['i'] += 1
layer_kwargs.update(kwargs)
raise ValueError
layers[0] = Catcher(layers[0])
for i in range(nsamples):
batch = testenc[:, (i * model.seqlen):((i + 1) * model.seqlen)].to(dev)
try:
model(batch)
except ValueError:
pass
layers[0] = layers[0].module # restore
layers[0] = layers[0].cpu()
move_embed(model, "cpu")
outs = torch.zeros_like(inps)
torch.cuda.empty_cache()
for i in range(len(layers)):
print(i)
layer = layers[i].to(dev)
for j in range(nsamples):
outs[j] = layer(inps[j].unsqueeze(0), **layer_kwargs)[0]
layers[i] = layer.cpu()
del layer
torch.cuda.empty_cache()
inps, outs = outs, inps
mod_list = move_norm_head(model, dev)
testenc = testenc.to(dev)
nlls = []
for i in range(nsamples):
hidden_states = inps[i].unsqueeze(0)
for mod in mod_list:
hidden_states = mod(hidden_states)
lm_logits = model.lm_head(hidden_states)
shift_logits = lm_logits[:, :-1, :].contiguous()
shift_labels = testenc[:, (i * model.seqlen):((i + 1) * model.seqlen)][:, 1:]
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
neg_log_likelihood = loss.float() * model.seqlen
nlls.append(neg_log_likelihood)
ppl = torch.exp(torch.stack(nlls).sum() / (nsamples * model.seqlen))
print(ppl.item())
# Path: dgq/utils/evalutils.py
@torch.no_grad()
def total_model_eval(model, testenc, dev, local_args=None):
# testenc = testenc.cpu()
testenc = testenc.input_ids
nsamples = testenc.numel() // model.seqlen
model = model.to(dev)
model.eval()
model.config.use_cache = False
torch.cuda.memory_summary()
model = model.to(dev)
nlls = []
for i in range(nsamples):
print(i)
batch = testenc[:, (i * model.seqlen):((i + 1) * model.seqlen)].to(dev)
out = model(batch)['logits']
shift_logits = out[:, :-1, :].contiguous()
shift_labels = testenc[:, (i * model.seqlen):((i + 1) * model.seqlen)][:, 1:].cuda()
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)).cpu()
neg_log_likelihood = loss.float() * model.seqlen
nlls.append(neg_log_likelihood)
torch.cuda.empty_cache()
ppl = torch.exp(torch.stack(nlls).sum() / (nsamples * model.seqlen))
print(ppl.item())
# Path: dgq/utils/evalutils.py
def mmlu_eval(model, mmlu_dataset, data_loader, abcd_idx, dev, local_args=None):
abcd_idx = abcd_idx
model.eval()
preds, refs = [], []
loss_mmlu = 0
cnt = 0
for batch in tqdm(data_loader, total=len(data_loader)):
cnt += 1
batch = to_device(batch, model.device)
with torch.no_grad():
outputs = model(**batch)
loss = outputs.loss
logits = outputs.logits
labels = batch['labels']
# There are two tokens, the output, and eos token.
for i, logit in enumerate(logits):
label_non_zero_id = (batch['labels'][i] != -100).nonzero()[0][0]
logit_abcd = logit[label_non_zero_id-1][abcd_idx]
preds.append(torch.argmax(logit_abcd).item())
labels = labels[labels != IGNORE_INDEX].view(-1, 2)[:,0]
refs += [abcd_idx.index(label) for label in labels.tolist()]
loss_mmlu += loss.item()
# Extract results by subject.
results = {'mmlu_loss':loss_mmlu/len(data_loader)}
subject = mmlu_dataset['subject']
subjects = {s:{'refs':[], 'preds':[]} for s in set(subject)}
for s,p,r in zip(subject, preds, refs):
subjects[s]['preds'].append(p)
subjects[s]['refs'].append(r)
subject_scores = []
for subject in subjects:
nn = len(subjects[subject]['refs'])
subject_score = 0 if nn==0 else sum([subjects[subject]['refs'][ii] == subjects[subject]['preds'][ii] for ii in range(nn)])/nn
results[f'accuracy_{subject}'] = subject_score
subject_scores.append(subject_score)
results[f'accuracy'] = np.mean(subject_scores)
return results
# Path: dgq/utils/loadutils.py
def load_quant(model, checkpoint):
if checkpoint.endswith('.safetensors'):
from safetensors.torch import load_file as safe_load
state_dict = model.state_dict()
ckt = safe_load(checkpoint)
for key in ckt.keys():
try:
state_dict[key].copy_(ckt[key])
except Exception as e:
print(key)
print(e)
pars = key.split('.')
att = pars[-1]
modname = '.'.join(pars[1:-1])
for name,mod in model.named_modules():
if modname in name:
delattr(mod,att)
mod.register_buffer(att, ckt[key])
# model.load_state_dict(ckt)
else:
model.load_state_dict(torch.load(checkpoint))
for sublayer in model.modules():
if isinstance(sublayer,QuantLinear):
sublayer.prepare_actfun()
delattr(sublayer, "weight")
model.seqlen = 2048
print('Done.')
return model
# Path: dgq/utils/loadutils.py
def inference_model(model):
if isinstance(model, OPTForCausalLM):
decoder_layer_scales = []
for layer in model.model.decoder.layers:
decoder_layer_scale = {"attn_input_scale": layer.self_attn.q_proj.amax.float() / (2 ** 7 - 1),
"q_output_scale": layer.self_attn.q_quant.scale.float(),
"k_output_scale": layer.self_attn.k_quant.scale.float(),
"v_output_scale": layer.self_attn.v_quant.scale.float(),
"out_input_scale": layer.self_attn.out_proj.amax.float() / (2 ** 7 - 1),
"fc1_input_scale": layer.fc1.amax.float() / (2 ** 7 - 1),
"fc2_input_scale": layer.fc2.amax.float() / (2 ** 7 - 1)}
decoder_layer_scales.append(decoder_layer_scale)
seqlen = model.seqlen
model = A8W4OPTForCausalLM.from_float(model, decoder_layer_scales)
model.seqlen = seqlen
elif isinstance(model, LlamaForCausalLM):
decoder_layer_scales = []
for layer in model.model.layers:
decoder_layer_scale = {"attn_input_scale": layer.self_attn.q_proj.amax.float() / (2 ** 7 - 1),
"q_output_scale": layer.self_attn.q_quant.scale.float(),
"k_output_scale": layer.self_attn.k_quant.scale.float(),
"v_output_scale": layer.self_attn.v_quant.scale.float(),
"out_input_scale": layer.self_attn.o_proj.amax.float() / (2 ** 7 - 1),
"mlp_input_scale": layer.mlp.up_proj.amax.float() / (2 ** 7 - 1),
"down_input_scale": layer.mlp.down_proj.amax.float() / (2 ** 7 - 1)}
decoder_layer_scales.append(decoder_layer_scale)
seqlen = model.seqlen
model = A8W4LlamaForCausalLM.from_float(model, decoder_layer_scales)
model.seqlen = seqlen
else:
raise NotImplementedError
return model
# Path: dgq/utils/modelutils.py
def convert_model(module, qconfig):
if isinstance(module, QuantLinear):
return
for name, mod in module.named_children():
if isinstance(mod, nn.Linear) and not name.endswith("head"):
newlayer = QuantLinear(mod.in_features, mod.out_features, hasattr(mod, "bias"), qconfig)
newlayer.weight = mod.weight
if hasattr(mod, "bias"):
newlayer.bias = mod.bias
setattr(module, name, newlayer)
elif isinstance(mod, OPTAttention):
OPTAttention_QKVQuant(mod, qconfig)
elif isinstance(mod, BloomAttention):
BLOOMAttention_QKVQuant(mod, qconfig)
elif isinstance(mod, LlamaAttention):
LlamaAttention_QKVQuant(mod, qconfig)
convert_model(mod, qconfig)
# Path: dgq/entry.py
import argparse
import numpy as np
import torch
import torch.nn as nn
import time
import lm_eval
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
from texttable import Texttable
from dgq.quant.quant_sequence import PTQ
from dgq.utils.datautils import get_loaders, prepare_mmlu
from dgq.utils.evalutils import model_eval, total_model_eval, mmlu_eval
from dgq.utils.loadutils import load_quant, inference_model
from dgq.utils.modelutils import convert_model
from safetensors.torch import save_file as safe_save
parser = argparse.ArgumentParser()
parser.add_argument('model', type=str, help='llama model to load')
parser.add_argument('dataset', type=str, choices=['wikitext2', 'ptb', 'c4'], help='Where to extract calibration data from.')
parser.add_argument('--nsamples', type=int, default=18, help='Number of calibration data samples.')
parser.add_argument('--seed', type=int, default=0, help='Seed for sampling the calibration data.')
parser.add_argument('--wbits', type=int, default=4, choices=[2, 3, 4, 8, 16], help='#bits to use for weight quantization; use 16 for evaluating base model.')
parser.add_argument('--abits', type=int, default=8, choices=[8, 16], help='#bits to use for activation quantization; use 16 for evaluating base model.')
parser.add_argument('--percdamp', type=float, default=.01, help='Percent of the average Hessian diagonal to use for dampening.')
parser.add_argument('--save', type=str, default='', help='Save quantized checkpoint under this name.')
parser.add_argument('--save_safetensors', type=str, default='', help='Save quantized `.safetensors` checkpoint under this name.')
parser.add_argument('--load', type=str, default='', help='Load quantized model.')
parser.add_argument('--benchmark', type=int, default=0, help='Number of tokens to use for benchmarking.')
parser.add_argument('--check', action='store_true', help='Whether to compute perplexity during benchmarking for verification.')
| parser.add_argument('--groupsize', type=int, default=-1, help='Groupsize to use for quantization; default uses full row.') |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: noco-ai/elemental-golem
# Path: application/download.py
def install_skill(all_skills, install_skill_data, shared_models, server_id, channel):
# Create a list to hold all the processes
processes = []
for skill in all_skills:
if skill["routing_key"] != install_skill_data["routing_key"]:
continue
if "model" in skill:
for model in skill["model"]:
process = multiprocessing.Process(target=download_model, args=(model, install_skill_data, shared_models, server_id, channel))
processes.append(process)
process.start()
if "repository" in skill:
for repo in skill["repository"]:
# Create and start a new process for each download
process = multiprocessing.Process(target=download_repo, args=(repo["url"], repo["folder"], repo["module_path"]))
processes.append(process)
process.start()
# Path: application/system_info.py
def get_system_info(server_id, gpu_type):
# network info
hostname = socket.gethostname()
system_info = {
"server_id": server_id,
"hostname": hostname
}
# RAM information
mem_info = psutil.virtual_memory()
system_info["ram"] = {
"total": mem_info.total,
"available": mem_info.available,
"used": mem_info.used,
"percent_used": mem_info.percent
}
# CPU information
system_info["cpu"] = {
"count": psutil.cpu_count(),
"percent_used": psutil.cpu_percent()
}
# Hard drive information
disk_usage = psutil.disk_usage(os.path.abspath(os.sep))
system_info["hd"] = {
"total": disk_usage.total,
"used": disk_usage.used,
"free": disk_usage.free,
"percent_used": disk_usage.percent
}
system_info["gpu"] = []
gpu_names = {}
# NVIDIA GPU information
if gpu_type == "nvidia":
nvmlInit()
device_count = nvmlDeviceGetCount()
for i in range(device_count):
handle = nvmlDeviceGetHandleByIndex(i)
name = nvmlDeviceGetName(handle)
mem_info = nvmlDeviceGetMemoryInfo(handle)
utilization = nvmlDeviceGetUtilizationRates(handle)
# rename gpu if we have more than more of the same type
if name in gpu_names:
gpu_name = f"{name} #{gpu_names[name]}"
gpu_names[name] += 1
else:
gpu_name = name
gpu_names[name] = 2
system_info["gpu"].append({
"device": f"cuda:{i}",
"name": gpu_name,
"memory_total": mem_info.total,
"memory_used": mem_info.used,
"memory_free": mem_info.free,
"gpu_utilization": utilization.gpu,
"memory_utilization": utilization.memory
})
nvmlShutdown()
# rename multiple gpus
for gpu in system_info["gpu"]:
if gpu["name"] in gpu_names and gpu_names[gpu["name"]] > 2:
gpu["name"] = f"{gpu['name']} #1"
return system_info
# Path: application/system_info.py
def load_configs(base_dir, vault_client, vault_root, server_id, gpu_type):
# Return data
all_skills = []
all_configs = {}
all_models = []
all_repos = []
script_map = {}
loaded_handlers = []
# load custom skills
custom_skill_map = {}
custom_skills = []
try:
filename = f"data/{server_id}_custom.json"
with open(filename, 'r') as file:
custom_skills = json.load(file)
except FileNotFoundError:
pass
for custom_skill in custom_skills:
golem_module_path = f"modules/{custom_skill['golem_module']}"
if golem_module_path not in custom_skill_map:
custom_skill_map[golem_module_path] = []
custom_skill_map[golem_module_path].append(custom_skill)
# Walk through the directory
for dir_path, dir_names, file_names in os.walk(base_dir):
# Check each file in the current directory
for file_name in file_names:
# If the file is not a golem.json file
if file_name != "golem.json":
continue
# Construct the full path to the file
full_path = os.path.join(dir_path, file_name)
# Open the file and load the JSON
with open(full_path, 'r') as f:
config = json.load(f)
# Save the loaded config to the dictionary
script_path = os.path.join(dir_path, config["script"])
config["script_path"] = script_path
all_configs[dir_path] = config
if "supported_gpu" in config and gpu_type not in config["supported_gpu"]:
logger.info(f"skipping handler {config['label']}, gpu not supported")
continue
if "repository" in config:
for repo in config["repository"]:
all_repos.append(repo["folder"])
# If the "skills" key exists in the JSON, append its contents to the all_models array
if "skills" in config:
if dir_path in custom_skill_map:
config["skills"].extend(custom_skill_map[dir_path])
loaded_handlers.append({
"unique_key": config.get("unique_key", ""),
"label": config.get("label", ""),
"description": config.get("description", "")
})
global_repos = config.get("repository", [])
global_configuration = config.get("configuration", {}) # Get the global configuration
global_config_dict = {option["name"]: option["default"] for option in global_configuration.get("options", [])}
vault_path = global_configuration.get("vault_path", "")
for skill in config["skills"]:
vault_data = {}
if vault_path:
try:
config_path = f'{vault_root}/data/{vault_path}/{skill["routing_key"]}'
vault_data_resp = vault_client.read(path=config_path)
vault_data = {} if vault_data_resp == None else vault_data_resp['data']['data']
except Exception as e:
pass # no need to log just means no override data has been set
module_name = dir_path.split("modules/")[1]
skill["golem_module"] = module_name
skill["raw"] = json.dumps(skill, indent=2)
skill["handler_key"] = config.get("unique_key", "")
skill_configuration = skill.get("configuration", {})
merged_config = {**global_config_dict, **skill_configuration, **vault_data} # Merge global, skill level and vault configurations
skill["configuration"] = merged_config # Replace the skill configuration with the merged configuration
skill["configuration_template"] = global_configuration.copy()
skill["repository"] = global_repos.copy()
for repo in skill["repository"]:
repo["module_path"] = dir_path
skill["secrets"] = {}
if "vault_path" in skill["configuration_template"]:
skill["configuration_template"]["vault_path"] = skill["configuration_template"]["vault_path"] + "/" + skill["routing_key"]
skill["multi_gpu_support"] = True if "multi_gpu_support" in config and config["multi_gpu_support"] == True else False
# protect sensetive data
if "options" in skill["configuration_template"]:
for option in skill["configuration_template"]["options"]:
if option["type"] == "secret":
skill["secrets"][option["name"]] = merged_config[option["name"]]
merged_config[option["name"]] = "SECRET"
all_skills.append(skill)
script_map[skill["routing_key"]] = script_path
if "model" not in skill:
continue
for model in skill["model"]:
if "files" in model:
for file in model["files"]:
model_full_path = os.path.join(model["name"], model["files"][file])
lock_file = hashlib.sha256(model_full_path.encode()).hexdigest()[:10] + ".lock"
all_models.append({"path": model_full_path, "lock_file": lock_file })
if "branch" in model:
for file in model["branch"]:
model_full_path = os.path.join(model["name"], model["branch"][file])
lock_file = hashlib.sha256(model_full_path.encode()).hexdigest()[:10] + ".lock"
all_models.append({"path": model_full_path, "lock_file": lock_file })
else:
model_full_path = model["name"]
lock_file = hashlib.sha256(model_full_path.encode()).hexdigest()[:10] + ".lock"
all_models.append({"path": model_full_path, "lock_file": lock_file })
return all_skills, all_configs, all_models, all_repos, script_map, loaded_handlers
# Path: application/system_info.py
def load_enabled_skills(server_id: str) -> dict:
# Check if the file exists
if not os.path.exists(f'data/{server_id}_skills.json'):
logger.info(f"file data/{server_id}_skills.json does not exist")
return {}
try:
with open(f'data/{server_id}_skills.json', 'r') as f:
enabled_skills = json.load(f)
except json.JSONDecodeError:
logger.info(f"invalid json in data/{server_id}_skills.json")
return {}
# Prepare an empty dictionary to hold valid skills
enabled_skills_dict = {}
# Define the expected keys and their data types
expected_keys = {"routing_key": str, "device": str, "use_precision": str}
for item in enabled_skills:
# Check if item contains all expected keys, their values are of the correct data types,
# and no additional keys are present
if (set(item.keys()) == set(expected_keys.keys()) and
all(isinstance(item[key], expected_keys[key]) for key in expected_keys)):
if item['routing_key'] not in enabled_skills_dict:
enabled_skills_dict[item['routing_key']] = []
enabled_skills_dict[item['routing_key']].extend([item])
else:
logger.error(f"tnvalid skill data: {item}")
return enabled_skills_dict
# Path: application/amqp.py
def connect_to_amqp(amqp_ip, amqp_user, amqp_password, amqp_vhost):
# Otherwise, establish a new connection for this process
connection_successful = True
try:
credentials = pika.PlainCredentials(amqp_user, amqp_password)
connection = pika.BlockingConnection(
pika.ConnectionParameters(
host=amqp_ip,
virtual_host=amqp_vhost,
credentials=credentials,
connection_attempts=5,
retry_delay=5,
socket_timeout=600
)
)
channel = connection.channel()
except Exception as e:
connection_successful = False
logger.error(f"failed to connect", e)
return connection_successful, connection, channel
# Path: application/amqp.py
def become_consumer(channel, queue_name, callback_function):
channel.basic_consume(queue=queue_name, on_message_callback=callback_function, auto_ack=False)
channel.start_consuming()
# Path: application/amqp.py
def bind_queue_to_exchange(channel, queue_name, exchange_name, routing_key=None):
channel.queue_bind(exchange=exchange_name, queue=queue_name, routing_key=routing_key)
# Path: application/amqp.py
def create_exchange(channel, exchange_name, exchange_type='direct'):
channel.exchange_declare(exchange=exchange_name, exchange_type=exchange_type)
# Path: application/amqp.py
def create_queue(channel, queue_name, dlx=None, dlx_queue='deadletters', is_exclusive=False, is_auto_delete=False):
# Declare the queue with 'dlx' as the DLX if provided
if dlx:
result = channel.queue_declare(queue=queue_name, exclusive=is_exclusive, auto_delete=is_auto_delete, arguments={
'x-dead-letter-exchange': dlx,
'x-dead-letter-routing-key': dlx_queue
})
else:
result = channel.queue_declare(queue=queue_name, exclusive=is_exclusive, auto_delete=is_auto_delete)
return result.method.queue
# Path: application/amqp.py
def send_message_to_exchange(channel, exchange_name, routing_key, message, headers=None):
properties = pika.BasicProperties(delivery_mode=2) # make message persistent
if headers is not None:
properties.headers = headers
channel.basic_publish(exchange=exchange_name,
routing_key=routing_key,
body=message,
properties=properties)
# Path: application/thread.py
def start_worker_threads(all_skills, skills_config, amqp_params, script_map, server_id):
# Iterate through pipelines in the config
for skill in all_skills:
routing_key = skill["routing_key"]
# Skip the pipeline if the name is not found in the devices_and_status_dict
device_and_status = skills_config.get(routing_key)
if device_and_status is None:
continue
for to_device in device_and_status:
# Create a new process for each consumer
stop_generation_event = multiprocessing.Event()
stop_generation_filter = multiprocessing.Array(ctypes.c_char, 128)
stop_event = multiprocessing.Event()
thread_status = multiprocessing.Array(ctypes.c_char, 24)
config_event = multiprocessing.Event()
thread_config = multiprocessing.Array(ctypes.c_char, 4096)
thread_status.raw = bytes("STARTING", "utf-8")
process = multiprocessing.Process(target=worker_thread, args=(amqp_params, stop_event, stop_generation_event, stop_generation_filter,
thread_status, config_event, thread_config, to_device, skill, script_map, server_id))
process.start()
device = to_device["device"]
ram = skill["memory_usage"][to_device["use_precision"]]
worker_threads.extend([{ "process": process,
"routing_key": routing_key, "device": device, "ram":
ram, "use_precision": to_device["use_precision"], "stop_event": stop_event, "stop_generation_event": stop_generation_event,
"stop_generation_filter": stop_generation_filter, "thread_status": thread_status, "config_event": config_event, "thread_config": thread_config}])
# Path: application/thread.py
def stop_worker_thread(skill_details, amqp_channel):
for i, thread in enumerate(worker_threads):
if thread["routing_key"] == skill_details["routing_key"] and thread["device"] == skill_details["device"] and thread["use_precision"] == skill_details["use_precision"]:
logger.info(f"stopping thread for {skill_details['routing_key']}")
thread["thread_status"].raw = bytes('\0' * 24, 'utf-8')
thread["thread_status"].raw = bytes("STOPPING", "utf-8")
thread["stop_event"].set()
send_message_to_exchange(amqp_channel, "golem_skill", skill_details["routing_key"], "STOP", None)
while True:
thread_string = bytes(thread["thread_status"].raw).rstrip(b'\x00').decode("utf-8")
if thread_string == "STOPPED":
break
thread["process"].join()
del worker_threads[i]
return
# Path: application/thread.py
def get_worker_threads():
return worker_threads
# Path: application/thread.py
def stop_all_threads(amqp_channel):
for i, thread in enumerate(worker_threads):
logger.info(f"stopping thread for {thread['routing_key']}")
thread["thread_status"].raw = bytes('\0' * 24, 'utf-8')
thread["thread_status"].raw = bytes("STOPPING", "utf-8")
thread["stop_event"].set()
send_message_to_exchange(amqp_channel, "golem_skill", thread["routing_key"], "STOP", None)
while True:
time.sleep(2)
thread_string = bytes(thread["thread_status"].raw).rstrip(b'\x00').decode("utf-8")
if thread_string == "STOPPED":
break
thread["process"].join()
del worker_threads[i]
# Path: application/thread.py
def update_thread_configuration(vault_root, vault_client, vault_path):
config_path = f'{vault_root}/data/{vault_path}'
logger.info(f"updating thread configuration for {config_path}")
vault_data_resp = vault_client.read(path=config_path)
vault_data = {} if vault_data_resp == None else vault_data_resp['data']['data']
path_parts = vault_path.split('/')
unique_key = path_parts[-1]
json_dump = json.dumps(vault_data)
if len(json_dump) >= 4096:
#logger.error(f"error: configuraation json longer than buffer")
return {}
for thread in worker_threads:
if thread["routing_key"] != unique_key:
continue
thread["thread_config"].raw = bytes('\0' * 4096, 'utf-8')
thread["thread_config"].raw = bytes(json_dump, "utf-8")
thread["config_event"].set()
return vault_data
# Path: application/thread.py
def stop_thread_generation(stop_details):
for i, thread in enumerate(worker_threads):
if thread["routing_key"] == stop_details["routing_key"]:
logger.info(f"sending stop generation to {stop_details['routing_key']}")
thread["stop_generation_filter"].raw = bytes('\0' * 128, 'utf-8')
thread["stop_generation_filter"].raw = bytes(stop_details["socket_id"], "utf-8")
thread["stop_generation_event"].set()
return
# Path: server.py
import logging
import argparse
import time
import os
import json
import hashlib
import hvac
from typing import Dict
from application.download import install_skill
from application.system_info import get_system_info, load_configs, load_enabled_skills
from application.amqp import connect_to_amqp, become_consumer, bind_queue_to_exchange
from application.amqp import create_exchange, create_queue, send_message_to_exchange
from application.thread import start_worker_threads, stop_worker_thread, get_worker_threads, stop_all_threads, update_thread_configuration, stop_thread_generation
time.sleep(sleep_time)
logger.info(f"retrying connection to vault server. attempt {retry+1}/{max_retries}")
# If connection is not successful after max_retries
fatal_error('unable to connect to vault server after multiple attempts.')
if __name__ == "__main__":
logger.info("starting elemental golem")
# Parse command-line arguments
parser = argparse.ArgumentParser(description='Vault creds')
parser.add_argument('--server-id', required=True, help='Unique server ID')
parser.add_argument('--vault-host', required=True, help='Vault server host address')
parser.add_argument('--vault-token-file', help='Path to the Vault token file', default='./vault-token')
parser.add_argument('--vault-root', help='Root path in the Vault server', default='spellbook')
parser.add_argument('--amqp-ip', help='Overrides what is stored in Vault for the amqp ip.')
parser.add_argument('--shared-models', required=False, help='Show be set to true is the data/ folder is shared between golem instances or in a docker container.', default=False, type=bool)
parser.add_argument('--gpu-type', help='The type of GPU the system has onboard', default='nvidia', choices=['nvidia', 'nogpu'])
args = parser.parse_args()
vault_client, vault_data = connect_to_vault(args.vault_host, args.vault_token_file, args.vault_root)
# connect to amqp
amqp_ip = args.amqp_ip if args.amqp_ip != None else vault_data['host']
amqp_params = {
'amqp_ip': amqp_ip,
'amqp_user': vault_data['username'],
'amqp_password': vault_data['password'],
'amqp_vhost': vault_data['vhost']
}
server_name = args.server_id
server_id = 'golem_' + hashlib.sha256(server_name.encode()).hexdigest()[:10]
# load config files
all_skills, all_configs, all_models, all_repos, script_map, loaded_handlers = load_configs('modules', vault_client, args.vault_root, server_id, args.gpu_type)
# load enabled models json tp dict
enabled_skills_dict = load_enabled_skills(server_id)
# start threads
start_worker_threads(all_skills, enabled_skills_dict, amqp_params, script_map, server_id)
# connect to rabbit mq
amqp_connected, amqp_connection, amqp_channel = connect_to_amqp(**amqp_params)
if amqp_connected == False:
fatal_error('unable to connect to amqp server')
# create dead letter exchange and queue
create_exchange(amqp_channel, 'deadletter')
flx_queue = create_queue(channel=amqp_channel, queue_name='deadletters')
bind_queue_to_exchange(amqp_channel, 'deadletters', 'deadletter')
# create exchange and queue for this server
create_exchange(amqp_channel, 'golem')
create_exchange(amqp_channel, 'golem_broadcast', 'fanout')
create_exchange(amqp_channel, 'arcane_bridge_broadcast', 'fanout')
create_queue(channel=amqp_channel, queue_name=server_id, is_auto_delete=True, dlx="deadletter")
bind_queue_to_exchange(amqp_channel, server_id, 'golem')
bind_queue_to_exchange(amqp_channel, server_id, 'golem_broadcast')
# start all the pipe threads
create_exchange(amqp_channel, 'golem_skill')
# define server call back for answering messages
def server_callback(ch, method, properties, body):
global all_skills, all_configs, all_models, all_repos, script_map, loaded_handlers
if "command" not in properties.headers or "return_routing_key" not in properties.headers or "return_exchange" not in properties.headers:
logger.info("command or return routing not found in header. command, return_route_key, and return_exchange are required headers")
amqp_channel.basic_reject(delivery_tag=method.delivery_tag, requeue=False)
return
logger.info(f"incoming command {properties.headers['command']}")
try:
headers = {}
command = properties.headers.get('command')
return_key = properties.headers.get('return_routing_key')
return_exchange = properties.headers.get('return_exchange')
for key, value in properties.headers.items():
# Exclude return_exchange and return_routing_key
if key not in ['return_exchange', 'return_routing_key', 'x-delay']:
headers[key] = value
if command == "system_info":
installed_models, installed_repos, downloading_models = check_data_directories(all_models, all_repos)
# get list of installed models
system_info = get_system_info(server_id, args.gpu_type)
system_info["server_id"] = server_id
system_info["server_label"] = server_id.replace("_", "-")
system_info["installed_models"] = installed_models
system_info["downloading_models"] = downloading_models
system_info["installed_repository"] = installed_repos
system_info["handlers"] = loaded_handlers
# protect secrets from the UI
stripped_skills = [{k: v for k, v in skill.items() if k != "secrets"} for skill in all_skills]
system_info["installed_skills"] = stripped_skills
running_skills = []
system_info["status"] = "ONLINE"
worker_threads = get_worker_threads()
for thread in worker_threads:
thread_status = thread["thread_status"].raw.decode().rstrip('\0')
if thread_status != "ONLINE":
system_info["status"] = "STARTING"
running_skills.extend([{"device":thread["device"], "routing_key": thread["routing_key"],
"ram": thread["ram"] * 1000000, "use_precision": thread["use_precision"],
"thread_status": thread_status }])
system_info["running_skills"] = running_skills
send_message_to_exchange(amqp_channel, return_exchange, return_key, json.dumps(system_info).encode(), headers)
amqp_channel.basic_ack(delivery_tag=method.delivery_tag)
return
elif command == "run_skill":
skill_details = json.loads(body)
add_skill(skill_details, server_id)
run_map = {skill_details["routing_key"]: [skill_details]}
start_worker_threads(all_skills, run_map, amqp_params, script_map, server_id)
amqp_channel.basic_ack(delivery_tag=method.delivery_tag)
| return |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: m4rkw/monzo-utils
# Path: monzo_utils/model/base.py
class BaseModel:
def __init__(self, attrs=None):
self.table = re.sub(r'(?<!^)(?=[A-Z])', '_', type(self).__name__).lower()
if attrs:
self.attrs = attrs
else:
self.attrs = {}
if 'id' not in self.attrs:
self.attrs['id'] = None
self.factory_query = False
def __getattr__(self, name):
if name in self.attrs:
return self.attrs[name]
match = re.match('^find_by_(.*?)$', name)
if match:
method_name = f"find_{self.table}_by_{match.group(1)}"
def find_object_by_fields(*args, **kwargs):
record = getattr(DB(), method_name)(*args, **kwargs)
if record:
return type(self)(record)
return record
return find_object_by_fields
match = re.match('^find_all_by_(.*?)$', name)
if match:
method_name = f"find_all_{self.table}_by_{match.group(1)}"
def find_objects_by_fields(*args, **kwargs):
objects = []
for record in getattr(DB(), method_name)(*args, **kwargs):
objects.append(type(self)(record))
return objects
return find_objects_by_fields
print("DB class method missing: %s" % (name))
sys.exit(1)
def __setattr__(self, name, value):
if name not in ['table','attrs']:
self.attrs[name] = value
else:
super().__setattr__(name, value)
def __delattr__(self, name):
self.attrs.pop(name)
def __str__(self):
for_display = {}
for key in self.attrs:
if type(self.attrs[key]) == datetime.date:
for_display[key] = self.attrs[key].strftime('%Y-%m-%d')
elif type(self.attrs[key]) == datetime.datetime:
for_display[key] = self.attrs[key].strftime('%Y-%m-%d %H:%M:%S')
elif type(self.attrs[key]) == decimal.Decimal:
for_display[key] = float(self.attrs[key])
else:
for_display[key] = self.attrs[key]
return json.dumps(for_display,indent=4)
def related(self, model, key_field, parent_id, orderby, orderdir, limit, deleted=None):
table = model.lower()
sql = f"select * from `{table}` where {key_field} = %s"
params = [parent_id]
if deleted is not None:
sql += f" and deleted = %s"
params.append(deleted)
sql += f" order by {orderby} {orderdir}"
if limit:
sql += " limit %s"
params.append(limit)
related = []
for row in DB().query(sql, params):
related.append(getattr(importlib.import_module(f"monzo_utils.model.{table}"), model)(row))
return related
def update(self, attrs):
self.attrs.update(attrs)
def save(self):
if self.id:
DB().update(self.table, self.id, self.attrs)
else:
self.id = DB().create(self.table, self.attrs)
def delete(self):
if self.id is None:
raise Exception("Unable to delete record with null id")
DB().query(f"delete from {self.table} where id = %s", [self.id])
def factory(self):
if self.factory_query is False:
DB().find(self.table)
self.factory_query = True
def select(self, select):
self.factory()
DB().select(select)
return self
def join(self, join_table):
if join_table not in self.RELATIONSHIPS:
raise Exception(f"no relationship defined between {self.table} and {join_table}")
self.factory()
DB().join(join_table, self.RELATIONSHIPS[join_table][0], self.RELATIONSHIPS[join_table][1])
return self
def leftJoin(self, join_table, where=None):
if join_table not in self.RELATIONSHIPS:
raise Exception(f"no relationship defined between {self.table} and {join_table}")
self.factory()
DB().leftJoin(join_table, self.RELATIONSHIPS[join_table][0], self.RELATIONSHIPS[join_table][1], where)
return self
def where(self, clause, params):
self.factory()
DB().where(clause, params)
return self
def andWhere(self, clause, params):
self.factory()
DB().andWhere(clause, params)
return self
def groupBy(self, group_by):
self.factory()
DB().groupBy(group_by)
return self
def orderBy(self, orderby, orderdir):
self.factory()
DB().orderBy(orderby, orderdir)
return self
def getall(self):
self.factory()
return DB().getall()
def getone(self):
self.factory()
return DB().getone()
# Path: monzo_utils/lib/db.py
class DB(metaclass=Singleton):
def __init__(self, db_config=None, config_path=None):
if db_config:
self.config = db_config
else:
self.config = Config(None, config_path).db
self.driver = getattr(importlib.import_module(f"monzo_utils.lib.db_driver.{self.config['driver']}"), self.config['driver'])(self.config)
self.columns = {}
def __getattr__(self, name):
match = re.match('^find_([\w]+)_by_(.*?)$', name)
if match:
table = match.group(1)
if table[0:4] == 'all_':
table = table[4:]
find_all = True
else:
find_all = False
fields = match.group(2).split('_and_')
def find_object_by_fields(*args, **kwargs):
sql = "select * from `" + table + "` where ("
sql_args = []
for i in range(0, len(fields)):
if i >0:
sql += " and "
if type(args[i]) == list:
sql += "("
for j in range(0, len(args[i])):
if j >0:
sql += " or "
if 'search' in kwargs and type(kwargs['search']) == list and fields[i] in kwargs['search']:
sql += f"`{fields[i]}` like %s"
sql_args.append('%' + args[i][j] + '%')
else:
sql += f"`{fields[i]}` = %s"
sql_args.append(args[i][j])
sql += ")"
else:
if 'search' in kwargs and type(kwargs['search']) == list and fields[i] in kwargs['search']:
sql += "`" + fields[i] + "` like %s"
sql_args.append('%' + args[i] + '%')
else:
sql += "`" + fields[i] + "` = %s"
sql_args.append(args[i])
sql += ")"
if 'where' in kwargs:
for where_clause in kwargs['where']:
sql += f" and {where_clause['clause']}"
if 'params' in where_clause:
sql_args += where_clause['params']
if 'orderby' in kwargs:
sql += f" order by {kwargs['orderby']}"
if 'orderdir' in kwargs:
sql += f" {kwargs['orderdir']}"
if 'limit' in kwargs:
sql += f" limit {kwargs['limit']}"
if find_all:
return self.query(sql, sql_args)
else:
return self.one(sql, sql_args)
return find_object_by_fields
else:
print("DB class method missing: %s" % (name))
sys.exit(1)
def json_params(self, params):
json_params = []
for param in params:
if type(param) == datetime.date:
json_params.append(param.strftime('%Y-%M-%d'))
elif type(param) == datetime.datetime:
json_params.append(param.strftime('%Y-%M-%d %H:%M:%S'))
else:
json_params.append(param)
return json_params
def query(self, sql, params=[]):
if 'DEBUG' in os.environ and os.environ['DEBUG'] == '1':
print("SQL: %s" % (sql))
print("PARAMS: %s" % (json.dumps(self.json_params(params),indent=4)))
result = self.driver.query(sql, params)
if type(result) == list:
rows = []
for row in result:
rows.append(self.fix_dates(row))
result = rows
return result
def fix_dates(self, row):
fixed_row = {}
for key in row:
if type(row[key]) == str:
m = re.match('^([\d]{4})-([\d]{2})-([\d]{2})$', row[key])
if m:
fixed_row[key] = datetime.date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
continue
m = re.match('^([\d]{4})-([\d]{2})-([\d]{2}) ([\d]{2}):([\d]{2}):([\d]{2})$', row[key])
if m:
fixed_row[key] = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4)), int(m.group(5)), int(m.group(6)))
continue
fixed_row[key] = row[key]
return fixed_row
def one(self, sql, params=[]):
rows = self.query(sql, params)
if len(rows) >0:
return rows[0]
return False
def find(self, table):
self.query_table = table
self.sel = []
self.whereClauses = []
self.whereParams = []
self.andWhereClauses = []
self._orderBy = None
self._orderDir = None
self._join = []
self._leftJoin = []
self._groupBy = None
return self
def select(self, select):
self.sel.append(select)
return self
def where(self, where, whereParams):
self.whereClauses.append(where)
self.whereParams += whereParams
return self
def andWhere(self, where, whereParams):
self.andWhereClauses.append(where)
self.whereParams += whereParams
return self
def orderBy(self, field, direction='asc'):
self._orderBy = field
self._orderDir = direction
return self
def join(self, join_table, join_left_col, join_right_col=None):
if join_right_col:
self._join.append({
'table': join_table,
'join_left_col': join_left_col,
'join_right_col': join_right_col
})
else:
self._join.append({
'table': join_table,
'clause': join_left_col
})
return self
def leftJoin(self, join_table, join_left_col, join_right_col, where=None):
self._leftJoin.append({
'table': join_table,
'join_left_col': join_left_col,
'join_right_col': join_right_col,
'where': where
})
return self
def orWhere(self, whereClause, whereParams=[]):
self.whereType = 'or'
return self.where(whereClause, whereParams)
def groupBy(self, groupBy):
self._groupBy = groupBy
return self
def prepare(self):
if self.sel == []:
select = '*'
else:
select = ''
for i in range(0, len(self.sel)):
if i >0:
select += ','
select += f"{self.sel[i]}"
sql = "select " + select + " from `" + self.query_table + "`"
for join in self._join:
sql += " join `" + join['table'] + "` on "
if 'clause' in join:
sql += join['clause']
else:
sql += join['join_left_col'] + " = " + join['join_right_col']
for join in self._leftJoin:
sql += " left join `" + join['table'] + "` on "
if 'clause' in join:
sql += join['clause']
else:
sql += join['join_left_col'] + " = " + join['join_right_col']
if len(self.whereClauses) >0:
sql += " where ("
for i in range(0, len(self.whereClauses)):
if i >0:
sql += " or "
sql += self.whereClauses[i]
sql += ")"
for i in range(0, len(self.andWhereClauses)):
sql += " and (" + self.andWhereClauses[i] + ") "
if self._groupBy:
sql += " group by " + self._groupBy
if self._orderBy:
sql += " order by "
order_by_fields = self._orderBy.split(',')
for i in range(0, len(order_by_fields)):
if i >0:
sql += ","
sql += f" `{order_by_fields[i].strip()}`"
if self._orderDir:
sql += " " + self._orderDir
return sql
def getone(self):
sql = self.prepare() + " limit 1"
return self.one(sql, self.whereParams)
def getall(self):
rows = []
for row in self.query(self.prepare(), self.whereParams):
rows.append(row)
return rows
def get_raw_query(self):
sql = self.prepare()
raw_sql = ''
n = 0
skip = False
for i in range(0, len(sql)):
if skip:
skip = False
continue
if sql[i:i+2] == '%s':
raw_sql += "'" + self.whereParams[n] + "'"
n += 1
skip = True
else:
raw_sql += sql[i]
return raw_sql
def update(self, table, _id, data):
if table not in self.columns:
self.columns[table] = self.driver.get_columns(table, exclude=['id'])
sql = f"update `{table}` set"
params = []
for i in range(0, len(self.columns[table])):
if i >0:
sql += ", "
sql += f" `{self.columns[table][i]}` = %s"
params.append(data[self.columns[table][i]] if self.columns[table][i] in data else None)
sql += f" where `id` = %s"
params.append(_id)
self.query(sql, params)
def create(self, table, data):
if table not in self.columns:
self.columns[table] = self.driver.get_columns(table, exclude=['id'])
sql = f"insert into `{table}` ("
params = []
for i in range(0, len(self.columns[table])):
if i >0:
sql += ","
sql += f"`{self.columns[table][i]}`"
params.append(data[self.columns[table][i]] if self.columns[table][i] in data else None)
sql += f") VALUES ("
for i in range(0, len(self.columns[table])):
if i >0:
sql += ","
sql += "%s"
sql += ")"
return self.query(sql, params)
# Path: monzo_utils/model/account.py
import sys
from monzo_utils.model.base import BaseModel
from monzo_utils.lib.db import DB
class Account(BaseModel):
DISPLAY_KEYS = ['name','sortcode','account_no','balance','available']
def __init__(self, attrs={}):
super().__init__(attrs)
def transactions(self, orderby='created_at', orderdir='asc', limit=None):
return super().related('Transaction', 'account_id', self.id, orderby, orderdir, limit)
def pots(self, orderby='name', orderdir='asc', limit=None):
return super().related('Pot', 'account_id', self.id, orderby, orderdir, limit, deleted=0)
@property
def __dict__(self):
attrs = {'attrs': self.attrs}
for pot in self.pots(orderby='name'):
attrs['attrs'][pot.name] = pot.balance
return attrs
@property
| def keys(self): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: rossiyareich/inknhue
# Path: src/conditional/conditional_dataset.py
class ConditionalDataset(Dataset):
def __init__(self, dataset_path, transform=None):
self.dataset_path = dataset_path
self.transform = transform
self.cond_dataset = []
style2paints = get_entries(f"{dataset_path}/style2paints/*.png")
colored = get_entries(f"{dataset_path}/colored/*.png")
assert len(style2paints) == len(colored)
for s, c in zip(style2paints, colored):
self.cond_dataset.append({"style2paints": s, "colored": c})
def __len__(self):
return len(self.cond_dataset)
def __getitem__(self, idx):
s = Image.open(self.cond_dataset[idx]["style2paints"]).convert("RGB")
c = Image.open(self.cond_dataset[idx]["colored"]).convert("RGB")
g = c.convert("L").convert("RGB")
if self.transform is not None:
return self.transform(g, s, c)
return g, s, c
# Path: src/conditional/conditional_decoder.py
class ConditionalDecoder(nn.Module):
def __init__(
self,
*,
channels: int,
channel_multipliers: List[int],
n_resnet_blocks: int,
out_channels: int,
z_channels: int
) -> None:
super().__init__()
# Number of blocks of different resolutions.
# The resolution is halved at the end each top level block
num_resolutions = len(channel_multipliers)
# Number of channels in each top level block, in the reverse order
channels_list = [m * channels for m in channel_multipliers]
# Number of channels in the top-level block
channels = channels_list[-1]
# Initial $3 \times 3$ convolution layer that maps the embedding space to `channels`
self.conv_in = nn.Conv2d(z_channels, channels, 3, stride=1, padding=1)
# ResNet blocks with attention
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(channels, channels)
self.mid.attn_1 = AttnBlock(channels)
self.mid.block_2 = ResnetBlock(channels, channels)
# List of top-level blocks
self.up = nn.ModuleList()
# Create top-level blocks
for i in reversed(range(num_resolutions)):
# Each top level block consists of multiple ResNet Blocks and up-sampling
resnet_blocks = nn.ModuleList()
# Add ResNet Blocks
for _ in range(n_resnet_blocks + 1):
resnet_blocks.append(ResnetBlock(channels, channels_list[i]))
channels = channels_list[i]
# Top-level block
up = nn.Module()
up.block = resnet_blocks
# Up-sampling at the end of each top level block except the first
if i != 0:
up.upsample = UpSample(channels)
else:
up.upsample = nn.Identity()
# Prepend to be consistent with the checkpoint
self.up.insert(0, up)
# Map to image space with a $3 \times 3$ convolution
self.norm_out = normalization(channels)
self.conv_out = nn.Conv2d(channels, out_channels, 3, stride=1, padding=1)
def forward(self, z: torch.Tensor, conds_z: List[torch.Tensor]) -> torch.Tensor:
# Map to `channels` with the initial convolution
h = self.conv_in(z)
# ResNet blocks with attention
h = self.mid.block_1(h)
h = self.mid.attn_1(h)
h = self.mid.block_2(h)
# Top-level blocks
for up, cond_z in reversed(list(zip(self.up, conds_z))):
# ResNet Blocks
for block in up.block:
h = block(h)
h += cond_z
# Up-sampling
h = up.upsample(h)
# Normalize and map to image space
h = self.norm_out(h)
h = swish(h)
img = self.conv_out(h)
return img
# Path: src/conditional/conditional_encoder.py
class ConditionalEncoder(nn.Module):
def __init__(
self,
*,
channels: int,
channel_multipliers: List[int],
n_resnet_blocks: int,
in_channels: int,
) -> None:
super().__init__()
# Number of blocks of different resolutions.
# The resolution is halved at the end each top level block
n_resolutions = len(channel_multipliers)
# Initial $3 \times 3$ convolution layer that maps the image to `channels`
self.conv_in = nn.Conv2d(in_channels, channels, 3, stride=1, padding=1)
# Number of channels in each top level block
channels_list = [m * channels for m in [1] + channel_multipliers]
# List of top-level blocks
self.down = nn.ModuleList()
self.proj = nn.ModuleList()
# Create top-level blocks
for i in range(n_resolutions):
# Each top level block consists of multiple ResNet Blocks and down-sampling
resnet_blocks = nn.ModuleList()
# Add ResNet Blocks
for _ in range(n_resnet_blocks):
resnet_blocks.append(ResnetBlock(channels, channels_list[i + 1]))
channels = channels_list[i + 1]
# Top-level block
down = nn.Module()
down.block = resnet_blocks
# Down-sampling at the end of each top level block except the last
if i != n_resolutions - 1:
down.downsample = DownSample(channels)
else:
down.downsample = nn.Identity()
self.down.append(down)
# Projection
proj = nn.Conv2d(channels, channels, 1, 1, 0)
proj = zero_module(proj)
self.proj.append(proj)
def forward(self, cond: torch.Tensor) -> List[torch.Tensor]:
# Map to `channels` with the initial convolution
x = self.conv_in(cond)
conds_z = []
# Top-level blocks
for down, proj in zip(self.down, self.proj):
# ResNet Blocks
for block in down.block:
x = block(x)
conds_z.append(proj(x))
# Down-sampling
x = down.downsample(x)
return conds_z
# Path: src/encoder.py
class Encoder(nn.Module):
"""
## Encoder module
"""
def __init__(
self,
*,
channels: int,
channel_multipliers: List[int],
n_resnet_blocks: int,
in_channels: int,
z_channels: int
):
"""
:param channels: is the number of channels in the first convolution layer
:param channel_multipliers: are the multiplicative factors for the number of channels in the
subsequent blocks
:param n_resnet_blocks: is the number of resnet layers at each resolution
:param in_channels: is the number of channels in the image
:param z_channels: is the number of channels in the embedding space
"""
super().__init__()
# Number of blocks of different resolutions.
# The resolution is halved at the end each top level block
n_resolutions = len(channel_multipliers)
# Initial $3 \times 3$ convolution layer that maps the image to `channels`
self.conv_in = nn.Conv2d(in_channels, channels, 3, stride=1, padding=1)
# Number of channels in each top level block
channels_list = [m * channels for m in [1] + channel_multipliers]
# List of top-level blocks
self.down = nn.ModuleList()
# Create top-level blocks
for i in range(n_resolutions):
# Each top level block consists of multiple ResNet Blocks and down-sampling
resnet_blocks = nn.ModuleList()
# Add ResNet Blocks
for _ in range(n_resnet_blocks):
resnet_blocks.append(ResnetBlock(channels, channels_list[i + 1]))
channels = channels_list[i + 1]
# Top-level block
down = nn.Module()
down.block = resnet_blocks
# Down-sampling at the end of each top level block except the last
if i != n_resolutions - 1:
down.downsample = DownSample(channels)
else:
down.downsample = nn.Identity()
self.down.append(down)
# Final ResNet blocks with attention
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(channels, channels)
self.mid.attn_1 = AttnBlock(channels)
self.mid.block_2 = ResnetBlock(channels, channels)
# Map to embedding space with a $3 \times 3$ convolution
self.norm_out = normalization(channels)
self.conv_out = nn.Conv2d(channels, 2 * z_channels, 3, stride=1, padding=1)
def forward(self, img: torch.Tensor):
"""
:param img: is the image tensor with shape `[batch_size, img_channels, img_height, img_width]`
"""
# Map to `channels` with the initial convolution
x = self.conv_in(img)
# Top-level blocks
for down in self.down:
# ResNet Blocks
for block in down.block:
x = block(x)
# Down-sampling
x = down.downsample(x)
# Final ResNet blocks with attention
x = self.mid.block_1(x)
x = self.mid.attn_1(x)
x = self.mid.block_2(x)
# Normalize and map to embedding space
x = self.norm_out(x)
x = swish(x)
x = self.conv_out(x)
return x
# Path: src/gaussian_distribution.py
class GaussianDistribution:
"""
## Gaussian Distribution
"""
def __init__(self, parameters):
"""
:param parameters: are the means and log of variances of the embedding of shape
`[batch_size, z_channels * 2, z_height, z_height]`
"""
self.parameters = parameters
# Split mean and log of variance
self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
# Clamp the log of variances
self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
# Calculate standard deviation
self.std = torch.exp(0.5 * self.logvar)
self.var = torch.exp(self.logvar)
def sample(self):
# Sample from the distribution
x = self.mean + self.std * torch.randn_like(
self.std, dtype=self.std.dtype, device=self.std.device
)
return x
def kl(self, other=None):
if other is None:
return 0.5 * torch.sum(
torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,
dim=[1, 2, 3],
)
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean, 2) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar,
dim=[1, 2, 3],
)
def nll(self, sample, dims=[1, 2, 3]):
logtwopi = np.log(2.0 * np.pi)
return 0.5 * torch.sum(
logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
dim=dims,
)
def mode(self):
return self.mean
# Path: src/perceptual_loss.py
class LPIPSWithDiscriminator(nn.Module):
def __init__(
self,
disc_start,
disc_num_layers=3,
disc_in_channels=3,
disc_factor=1.0,
disc_weight=1.0,
disc_loss="hinge",
kl_weight=1.0,
perceptual_weight=1.0,
use_actnorm=False,
):
super().__init__()
assert disc_loss in ["hinge", "vanilla"]
self.discriminator = NLayerDiscriminator(
input_nc=disc_in_channels, n_layers=disc_num_layers, use_actnorm=use_actnorm
).apply(weights_init)
self.discriminator_iter_start = disc_start
self.disc_loss = hinge_d_loss if disc_loss == "hinge" else vanilla_d_loss
self.disc_factor = disc_factor
self.discriminator_weight = disc_weight
# self.kl_weight = kl_weight
self.perceptual_loss = LPIPS().eval()
self.perceptual_weight = perceptual_weight
self.logvar = nn.Parameter(torch.ones(size=()) * 0)
def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer):
nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
d_weight *= self.discriminator_weight
return d_weight
def forward(
self,
inputs,
reconstructions,
posteriors,
optimizer_idx,
global_step,
last_layer,
cond,
):
rec_loss = torch.abs(cond.contiguous() - reconstructions.contiguous())
if self.perceptual_weight > 0:
p_loss = self.perceptual_loss(
cond.contiguous(), reconstructions.contiguous()
)
rec_loss += self.perceptual_weight * p_loss
nll_loss = torch.mean(rec_loss)
if optimizer_idx == 0:
logits_fake = self.discriminator(reconstructions.contiguous())
g_loss = -torch.mean(logits_fake)
if self.disc_factor > 0.0:
try:
d_weight = self.calculate_adaptive_weight(
nll_loss, g_loss, last_layer=last_layer
)
except RuntimeError:
assert not self.training
d_weight = torch.tensor(0.0)
else:
d_weight = torch.tensor(0.0)
disc_factor = adopt_weight(
self.disc_factor, global_step, threshold=self.discriminator_iter_start
)
loss = nll_loss + d_weight * disc_factor * g_loss
log = {
"total_loss": loss.clone().detach().mean(),
"rec_loss": rec_loss.detach().mean(),
"d_weight": d_weight.detach(),
"disc_factor": torch.tensor(disc_factor),
"g_loss": g_loss.detach().mean(),
}
return loss, log
if optimizer_idx == 1:
logits_real = self.discriminator(cond.contiguous().detach())
logits_fake = self.discriminator(reconstructions.contiguous().detach())
disc_factor = adopt_weight(
self.disc_factor, global_step, threshold=self.discriminator_iter_start
)
d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)
log = {
"disc_loss": d_loss.clone().detach().mean(),
"logits_real": logits_real.detach().mean(),
"logits_fake": logits_fake.detach().mean(),
}
return d_loss, log
# Path: src/utils.py
def resize(img, size):
w, h = img.size
if w != size or h != size:
if w <= h:
h = int(float(h) * float(size) / float(w))
w = size
else:
w = int(float(w) * float(size) / float(h))
h = size
img = img.resize((w, h), Image.Resampling.LANCZOS)
return img
# Path: train.py
import argparse
import copy
import gc
import logging
import os
import numpy as np
import torch
import torch.backends.cuda
import torch.backends.cudnn
import torch.optim as optim
import torchvision.transforms.functional as VF
import wandb
from accelerate import Accelerator
from omegaconf import OmegaConf
from rich.traceback import install
from torch import nn
from torch.utils.data import DataLoader, Subset
from torchvision import transforms
from tqdm.auto import tqdm
from src.conditional.conditional_dataset import ConditionalDataset
from src.conditional.conditional_decoder import ConditionalDecoder
from src.conditional.conditional_encoder import ConditionalEncoder
from src.encoder import Encoder
from src.gaussian_distribution import GaussianDistribution
from src.perceptual_loss import LPIPSWithDiscriminator
from src.utils import resize
pil_to_tensor = transforms.PILToTensor()
g, s, c = pil_to_tensor(g), pil_to_tensor(s), pil_to_tensor(c)
g, s, c = (
((g / 255.0) * 2.0 - 1.0).clamp(-1, 1),
((s / 255.0) * 2.0 - 1.0).clamp(-1, 1),
((c / 255.0) * 2.0 - 1.0).clamp(-1, 1),
)
return g, s, c
cond_dataset = cond_dataset_full = ConditionalDataset(
dataset_path=conf.paths.dataset_path, transform=transform
)
if not conf.params.use_entire_dataset:
if conf.params.use_sequential_dataset:
cond_dataset_indices = np.arange(conf.params.dataset_size)
else:
cond_dataset_indices = np.random.choice(
len(cond_dataset), conf.params.dataset_size, replace=False
)
cond_dataset = Subset(cond_dataset_full, cond_dataset_indices)
cond_dataloader = DataLoader(
dataset=cond_dataset,
batch_size=conf.params.batch_size,
num_workers=min(12, conf.params.batch_size * 2),
shuffle=True,
)
# Setup optimizers
logging.info("Setting up optimizers")
if conf.params.unlock_decoder:
optimizer_g = optim.AdamW(
list(cond_encoder.parameters()) + list(cond_decoder.parameters()),
lr=conf.params.base_lr,
betas=tuple(conf.params.betas),
)
else:
optimizer_g = optim.AdamW(
cond_encoder.parameters(),
lr=conf.params.base_lr,
betas=tuple(conf.params.betas),
)
optimizer_d = optim.AdamW(
discriminator.parameters(),
lr=conf.params.base_lr,
betas=tuple(conf.params.betas),
)
# Training
logging.info("Start training")
def do_log():
logging.info(f"Logging to wandb for global step {global_step}")
colored = wandb.Image(((c[0] + 1.0) * 0.5).clamp(0, 1))
grayscale = wandb.Image(((g[0] + 1.0) * 0.5).clamp(0, 1))
style2paints = wandb.Image(((s[0] + 1.0) * 0.5).clamp(0, 1))
reconstruction = wandb.Image(((y[0] + 1.0) * 0.5).clamp(0, 1))
step_log.update(
{
"colored": colored,
"grayscale": grayscale,
"style2paints": style2paints,
"reconstruction": reconstruction,
}
)
wandb.log(step_log)
def do_checkpoint():
logging.info(f"Saving checkpoint for epoch {epoch}")
accelerator.wait_for_everyone()
torch.save(
{
"epoch": epoch,
"global_step": global_step,
"rec_loss": step_log["rec_loss"],
"cond_encoder_state_dict": accelerator.get_state_dict(cond_encoder),
"cond_decoder_state_dict": accelerator.get_state_dict(cond_decoder),
"discriminator_state_dict": accelerator.get_state_dict(discriminator),
"optimizer_g_state_dict": accelerator.get_state_dict(optimizer_g),
"optimizer_d_state_dict": accelerator.get_state_dict(optimizer_d),
},
f"{conf.paths.checkpoint_path}/model_epoch{epoch:06}.ckpt",
)
(
quant_conv,
post_quant_conv,
encoder,
cond_encoder,
cond_decoder,
discriminator,
cond_dataloader,
optimizer_g,
optimizer_d,
) = accelerator.prepare(
quant_conv,
post_quant_conv,
encoder,
cond_encoder,
cond_decoder,
discriminator,
cond_dataloader,
optimizer_g,
optimizer_d,
)
global_step = 0
for epoch in range(conf.params.epoch):
with tqdm(
cond_dataloader, unit="batch", disable=not accelerator.is_local_main_process
) as tepoch:
tepoch.set_description(f"Epoch {epoch}")
# Should checkpoint?
should_checkpoint = (
epoch % conf.params.checkpoint_epochs == 0
or epoch == conf.params.epoch - 1
) and accelerator.is_local_main_process
step_log = {}
| for batch, (g, s, c) in enumerate(tepoch): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: TencentBlueKing/bkflow-feel
# Path: bkflow_feel/data_models.py
class RangeGroupOperator(enum.Enum):
GT = "greater than"
GTE = "greater than or equal"
LT = "less than"
LTE = "less than or equal"
# Path: bkflow_feel/parsers.py
class AfterFunc(BinaryOperator):
def evaluate(self, context):
left_val = self.left.evaluate(context)
right_val = self.right.evaluate(context)
operator = None
if isinstance(self.left, RangeGroup):
if left_val.left_operator == RangeGroupOperator.GT:
operator = RangeGroupOperator.GTE
left_val = left_val.left_val
if isinstance(self.right, RangeGroup):
if right_val.right_operator == RangeGroupOperator.LT:
operator = RangeGroupOperator.GTE
right_val = right_val.right_val
if operator == RangeGroupOperator.GTE:
return left_val >= right_val
return left_val > right_val
# Path: bkflow_feel/parsers.py
class And(BinaryOperator):
def evaluate(self, context):
return self.left.evaluate(context) and self.right.evaluate(context)
# Path: bkflow_feel/parsers.py
class BeforeFunc(BinaryOperator):
def evaluate(self, context):
left_val = self.left.evaluate(context)
right_val = self.right.evaluate(context)
operator = None
if isinstance(self.left, RangeGroup):
if left_val.right_operator == RangeGroupOperator.LT:
operator = RangeGroupOperator.GTE
left_val = left_val.right_val
if isinstance(self.right, RangeGroup):
if right_val.left_operator == RangeGroupOperator.GT:
operator = RangeGroupOperator.GTE
right_val = right_val.left_val
if operator == RangeGroupOperator.GTE:
return left_val <= right_val
return left_val < right_val
# Path: bkflow_feel/parsers.py
class Between(Expression):
def __init__(self, value, left, right):
self.value = value
self.min = left
self.max = right
def evaluate(self, context):
value = self.value.evaluate(context)
return self.min.evaluate(context) <= value <= self.max.evaluate(context)
# Path: bkflow_feel/parsers.py
class Boolean(CommonExpression):
pass
# Path: bkflow_feel/parsers.py
class Context(Expression):
def __init__(self, pairs):
self.pairs = pairs
def evaluate(self, context):
return dict(pair.evaluate(context) for pair in self.pairs)
# Path: bkflow_feel/parsers.py
class ContextItem(Expression):
def __init__(self, expr, keys):
self.expr = expr
self.keys = keys
def evaluate(self, context):
result = self.expr.evaluate(context)
for key in self.keys:
if not isinstance(result, dict):
return None
result = result.get(key)
return result
# Path: bkflow_feel/parsers.py
class Date(CommonExpression):
def evaluate(self, context):
year, month, day = self.value.split("-")
return datetime.date(int(year), int(month), int(day))
# Path: bkflow_feel/parsers.py
class DateAndTime(Expression):
def __init__(self, date: Date, time: Time):
self.date = date
self.time = time
def evaluate(self, context):
date = self.date.evaluate(context)
time = self.time.evaluate(context)
return datetime.datetime.combine(date, time, tzinfo=time.tzinfo)
# Path: bkflow_feel/parsers.py
class DayOfWeekFunc(CommonExpression):
WEEKDAYS = [
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday",
]
def evaluate(self, context):
date_or_datetime = self.value.evaluate(context)
return self.WEEKDAYS[date_or_datetime.weekday()]
# Path: bkflow_feel/parsers.py
class Expr(CommonExpression):
def evaluate(self, context):
return self.value.evaluate(context)
# Path: bkflow_feel/parsers.py
class FuncInvocation(Expression):
def __init__(self, func_name, args=None, named_args=None):
self.func_name = func_name
self.args = args or []
self.named_args = named_args or {}
def evaluate(self, context):
try:
func = FEELFunctionsManager.get_func(self.func_name)
except Exception as e:
logger.exception(e)
func = None
if not func:
return None
if self.args:
params = [arg.evaluate(context) for arg in self.args]
return func(*params)
elif self.named_args:
params = {key: arg.evaluate(context) for key, arg in self.named_args.items()}
return func(**params)
return func()
# Path: bkflow_feel/parsers.py
class FunctionCall(Expression):
def __init__(self, name, args):
self.name = name
self.args = args
def evaluate(self, context):
function = context.get(self.name)
if function is None:
raise ValueError(f"Unknown function: {self.name}")
return function(*[arg.evaluate(context) for arg in self.args])
# Path: bkflow_feel/parsers.py
class In(BinaryOperator):
def evaluate(self, context):
left_val = self.left.evaluate(context)
right_val = self.right.evaluate(context)
if isinstance(self.right, RangeGroup):
left_operation = (
left_val > right_val.left_val
if right_val.left_operator == RangeGroupOperator.GT
else left_val >= right_val.left_val
)
right_operation = (
left_val < right_val.right_val
if right_val.right_operator == RangeGroupOperator.LT
else left_val <= right_val.right_val
)
return left_operation and right_operation
return left_val in right_val
# Path: bkflow_feel/parsers.py
class IncludesFunc(BinaryOperator):
def evaluate(self, context):
left_val: RangeGroupData = self.left.evaluate(context)
right_val = self.right.evaluate(context)
if isinstance(self.right, RangeGroup):
left_operation = left_val.left_val <= right_val.left_val
if left_val.left_operator == RangeGroupOperator.GT and right_val.left_operator == RangeGroupOperator.GTE:
left_operation = left_val.left_val < right_val.left_val
right_operation = left_val.right_val >= right_val.right_val
if left_val.right_operator == RangeGroupOperator.LT and right_val.right_operator == RangeGroupOperator.LTE:
right_operation = left_val.right_val > right_val.right_val
else:
left_operation = left_val.left_val <= right_val
if left_val.left_operator == RangeGroupOperator.GT:
left_operation = left_val.left_val < right_val
right_operation = left_val.right_val >= right_val
if left_val.right_operator == RangeGroupOperator.LT:
right_operation = left_val.right_val > right_val
return left_operation and right_operation
# Path: bkflow_feel/parsers.py
class List(Expression):
def __init__(self, *items):
self.items = items
def evaluate(self, context):
return [item.evaluate(context) for item in self.items]
# Path: bkflow_feel/parsers.py
class ListEvery(ListMatch):
def evaluate(self, context):
iter_pairs = self.evaluate_and_validate_iter_pairs(context)
for i in range(0, len(iter_pairs[0][1])):
tmp_context = {**context, **{pair[0]: pair[1][i] for pair in iter_pairs}}
if self.expr.evaluate(tmp_context) is False:
return False
return True
# Path: bkflow_feel/parsers.py
class ListFilter(Expression):
def __init__(self, list_expr, filter_expr):
self.list_expr = list_expr
self.filter_expr = filter_expr
def evaluate(self, context):
items = self.list_expr.evaluate(context)
if not isinstance(items, list):
return None
result = []
for item in items:
try:
# 当 item 为 dict 且 filter 中对比的 key 缺失时,可能报错
if self.filter_expr.evaluate(item if isinstance(item, dict) else {"item": item}):
result.append(item)
except Exception as e:
logger.exception(e)
pass
return result
# Path: bkflow_feel/parsers.py
class ListItem(Expression):
def __init__(self, list_expr, index):
self.list_expr = list_expr
self.index = index
def evaluate(self, context):
items = self.list_expr.evaluate(context)
if not isinstance(items, list) or self.index == 0 or len(items) < abs(self.index):
return None
items = items[self.index - 1] if self.index > 0 else items[self.index]
return items
# Path: bkflow_feel/parsers.py
class ListOperator(Expression):
def __init__(self, operation, *expr):
self.operation = operation
self.expr = expr
def evaluate(self, context):
return getattr(self, self.operation)(context)
def list_contains(self, context):
list_ = self.expr[0].evaluate(context)
item = self.expr[1].evaluate(context)
return item in list_
def list_count(self, context):
list_ = self.expr[0].evaluate(context)
return len(list_)
def list_all(self, context):
list_ = self.expr[0].evaluate(context)
return all(list_)
def list_any(self, context):
list_ = self.expr[0].evaluate(context)
return any(list_)
# Path: bkflow_feel/parsers.py
class ListSome(ListMatch):
def evaluate(self, context):
iter_pairs = self.evaluate_and_validate_iter_pairs(context)
for i in range(0, len(iter_pairs[0][1])):
tmp_context = {**context, **{pair[0]: pair[1][i] for pair in iter_pairs}}
if self.expr.evaluate(tmp_context) is True:
return True
return False
# Path: bkflow_feel/parsers.py
class MonthOfYearFunc(CommonExpression):
MONTH_MAPPING = {
1: "January",
2: "February",
3: "March",
4: "April",
5: "May",
6: "June",
7: "July",
8: "Auguest",
9: "September",
10: "October",
11: "November",
12: "December",
}
def evaluate(self, context):
date_or_datetime = self.value.evaluate(context)
return self.MONTH_MAPPING[date_or_datetime.month]
# Path: bkflow_feel/parsers.py
class Not(Expression):
def __init__(self, value):
self.value = value
def evaluate(self, context):
return not self.value.evaluate(context)
# Path: bkflow_feel/parsers.py
class NotEqual(BinaryOperator):
def evaluate(self, context):
return self.left.evaluate(context) != self.right.evaluate(context)
# Path: bkflow_feel/parsers.py
class NowFunc(Expression):
def evaluate(self, context):
# TODO:带时区需要配置
return datetime.datetime.now()
# Path: bkflow_feel/parsers.py
class Null(Expression):
def evaluate(self, context):
return None
# Path: bkflow_feel/parsers.py
class Number(CommonExpression):
pass
# Path: bkflow_feel/parsers.py
class Or(BinaryOperator):
def evaluate(self, context):
return self.left.evaluate(context) or self.right.evaluate(context)
# Path: bkflow_feel/parsers.py
class Pair(Expression):
def __init__(self, key, value):
self.key = key
self.value = value
def evaluate(self, context):
return self.key.evaluate(context), self.value.evaluate(context)
# Path: bkflow_feel/parsers.py
class RangeGroup(BinaryOperator):
def __init__(self, left, right, left_operator, right_operator):
self.left = left
self.right = right
self.left_operator = left_operator
self.right_operator = right_operator
def evaluate(self, context):
left_val = self.left.evaluate(context)
right_val = self.right.evaluate(context)
data = {
"left_val": left_val,
"right_val": right_val,
"left_operator": self.left_operator,
"right_operator": self.right_operator,
}
return RangeGroupData(**data)
# Path: bkflow_feel/parsers.py
class SameTypeBinaryOperator(BinaryOperator):
validator_cls = BinaryOperationValidator
def __init__(self, operation, left, right):
super().__init__(left, right)
self.operation = operation
def evaluate(self, context):
left_val = self.left.evaluate(context)
right_val = self.right.evaluate(context)
self.validator_cls()(left_val, right_val)
return getattr(self, self.operation)(left_val, right_val)
def add(self, left_val, right_val):
return left_val + right_val
def subtract(self, left_val, right_val):
return left_val - right_val
def multiply(self, left_val, right_val):
return left_val * right_val
def divide(self, left_val, right_val):
return left_val / right_val
def power(self, left_val, right_val):
return left_val**right_val
def equal(self, left_val, right_val):
return left_val == right_val
def less_than(self, left_val, right_val):
return left_val < right_val
def greater_than(self, left_val, right_val):
return left_val > right_val
def less_than_or_equal(self, left_val, right_val):
return left_val <= right_val
def greater_than_or_equal(self, left_val, right_val):
return left_val >= right_val
# Path: bkflow_feel/parsers.py
class String(CommonExpression):
pass
# Path: bkflow_feel/parsers.py
class StringOperator(BinaryOperator):
validator_cls = BinaryOperationValidator
def __init__(self, operation, left, right):
super().__init__(left, right)
self.operation = operation
def evaluate(self, context):
left_val = self.left.evaluate(context)
right_val = self.right.evaluate(context)
self.validator_cls()(left_val, right_val, instance_type=str)
return getattr(self, self.operation)(left_val, right_val)
def contains(self, left_str, right_str):
return right_str in left_str
def starts_with(self, left_str, right_str):
return left_str.startswith(right_str)
def ends_with(self, left_str, right_str):
return left_str.endswith(right_str)
def matches(self, left_str, right_str):
return re.match(right_str, left_str) is not None
# Path: bkflow_feel/parsers.py
class Time(Expression):
def __init__(self, value, timezone: TZInfo = None):
self.value = value
self.timezone = timezone
def evaluate(self, context):
parsed_dt = date_parse(self.value)
timezone = self.timezone.evaluate(context) if self.timezone is not None else None
return datetime.time(parsed_dt.hour, parsed_dt.minute, parsed_dt.second, tzinfo=timezone)
# Path: bkflow_feel/parsers.py
class TodayFunc(Expression):
def evaluate(self, context):
return datetime.date.today()
# Path: bkflow_feel/parsers.py
class ToString(CommonExpression):
def evaluate(self, context):
return str(self.value.evaluate(context))
# Path: bkflow_feel/parsers.py
class TZInfo(Expression):
def __init__(self, method, value):
self.method = method
self.value = value
def evaluate(self, context):
if self.method == "name":
return pytz.timezone(self.value)
elif self.method == "offset":
hours, minutes = map(int, self.value.split(":"))
sign = -1 if hours < 0 else 1
hours = abs(hours)
offset = hours * 60 + minutes
return pytz.FixedOffset(sign * offset)
# Path: bkflow_feel/parsers.py
class Variable(Expression):
def __init__(self, name):
self.name = name
def evaluate(self, context):
return context.get(self.name)
# Path: bkflow_feel/parsers.py
class IsDefinedFunc(CommonExpression):
def evaluate(self, context):
return self.value.evaluate(context) is not None
# Path: bkflow_feel/parsers.py
class GetOrElseFunc(BinaryOperator):
def evaluate(self, context):
left_val = self.left.evaluate(context)
right_val = self.right.evaluate(context)
return left_val if left_val is not None else right_val
# Path: bkflow_feel/transformer.py
from lark import Token, Transformer, v_args
from .data_models import RangeGroupOperator
from .parsers import (
AfterFunc,
And,
BeforeFunc,
Between,
Boolean,
Context,
ContextItem,
Date,
DateAndTime,
DayOfWeekFunc,
Expr,
FuncInvocation,
FunctionCall,
In,
IncludesFunc,
List,
ListEvery,
ListFilter,
ListItem,
ListOperator,
ListSome,
MonthOfYearFunc,
Not,
NotEqual,
NowFunc,
Null,
Number,
Or,
Pair,
RangeGroup,
SameTypeBinaryOperator,
String,
StringOperator,
Time,
TodayFunc,
ToString,
TZInfo,
Variable, IsDefinedFunc, GetOrElseFunc,
)
return RangeGroup(start, end, RangeGroupOperator.GTE, RangeGroupOperator.LT)
def variable(self, name_token):
return Variable(name_token.value)
def function_call(self, name, *args):
return FunctionCall(name, args)
def add(self, left, right):
return SameTypeBinaryOperator("add", left, right)
def sub(self, left, right):
return SameTypeBinaryOperator("subtract", left, right)
def mul(self, left, right):
return SameTypeBinaryOperator("multiply", left, right)
def div(self, left, right):
return SameTypeBinaryOperator("divide", left, right)
def pow(self, left, right):
return SameTypeBinaryOperator("power", left, right)
def eq(self, left, right):
return SameTypeBinaryOperator("equal", left, right)
def ne(self, left, right):
return NotEqual(left, right)
def lt(self, left, right):
return SameTypeBinaryOperator("less_than", left, right)
def gt(self, left, right):
return SameTypeBinaryOperator("greater_than", left, right)
def lte(self, left, right):
return SameTypeBinaryOperator("less_than_or_equal", left, right)
def gte(self, left, right):
return SameTypeBinaryOperator("greater_than_or_equal", left, right)
def and_(self, left, right):
return And(left, right)
def or_(self, left, right):
return Or(left, right)
def between(self, target, left, right):
return Between(target, left, right)
def in_(self, left, right):
return In(left, right)
def not_func(self, value):
return Not(value)
def date(self, value):
return Date(value)
def date_func(self, value):
return Expr(value)
def timezone(self, value):
return Expr(value)
def tz_offset(self, token):
return TZInfo("offset", token.value)
def tz_name(self, *tokens):
if len(tokens) == 2:
name = f"{tokens[0].value}/{tokens[1].value}"
else:
name = "UTC"
return TZInfo("name", name)
def time(self, value, timezone=None):
return Time(value, timezone)
def date_and_time(self, date, time):
return DateAndTime(date, time)
def time_func(self, value):
return Expr(value)
def date_and_time_func(self, value):
return Expr(value)
def now_func(self):
return NowFunc()
def today_func(self):
return TodayFunc()
def day_of_week_func(self, value):
return DayOfWeekFunc(value)
def month_of_year_func(self, value):
return MonthOfYearFunc(value)
def before_func(self, left, right):
return BeforeFunc(left, right)
def after_func(self, left, right):
return AfterFunc(left, right)
def includes_func(self, left, right):
return IncludesFunc(left, right)
def get_or_else_func(self, value, default):
return GetOrElseFunc(value, default)
def is_defined_func(self, value):
return IsDefinedFunc(value)
def context(self, *args):
return Context(args)
def context_item(self, expr, *keys):
return ContextItem(expr, keys)
| def pair(self, key_token, value): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: sivasurend/lyzr
# Path: lyzr/utils/rag_utils.py
def pdf_rag(
input_dir: Optional[str] = None,
input_files: Optional[List] = None,
exclude_hidden: bool = True,
filename_as_id: bool = True,
recursive: bool = True,
required_exts: Optional[List[str]] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
query_engine_params: dict = None,
) -> BaseQueryEngine:
documents = read_pdf_as_documents(
input_dir=input_dir,
input_files=input_files,
exclude_hidden=exclude_hidden,
filename_as_id=filename_as_id,
recursive=recursive,
required_exts=required_exts,
)
llm_params = {} if llm_params is None else llm_params
vector_store_params = (
{"vector_store_type": "LanceDBVectorStore"}
if vector_store_params is None
else vector_store_params
)
service_context_params = (
{} if service_context_params is None else service_context_params
)
query_engine_params = {} if query_engine_params is None else query_engine_params
llm = LyzrLLMFactory.from_defaults(**llm_params)
service_context = LyzrService.from_defaults(
llm=llm,
embed_model=embed_model,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
**service_context_params,
)
vector_store_index = LyzrVectorStoreIndex.from_defaults(
**vector_store_params,
documents=documents,
service_context=service_context,
)
return vector_store_index.as_query_engine(**query_engine_params, similarity_top_k=5)
# Path: lyzr/utils/rag_utils.py
def txt_rag(
input_dir: Optional[str] = None,
input_files: Optional[List] = None,
exclude_hidden: bool = True,
filename_as_id: bool = True,
recursive: bool = True,
required_exts: Optional[List[str]] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
query_engine_params: dict = None,
) -> BaseQueryEngine:
documents = read_txt_as_documents(
input_dir=input_dir,
input_files=input_files,
exclude_hidden=exclude_hidden,
filename_as_id=filename_as_id,
recursive=recursive,
required_exts=required_exts,
)
llm_params = {} if llm_params is None else llm_params
vector_store_params = (
{"vector_store_type": "LanceDBVectorStore"}
if vector_store_params is None
else vector_store_params
)
service_context_params = (
{} if service_context_params is None else service_context_params
)
query_engine_params = {} if query_engine_params is None else query_engine_params
llm = LyzrLLMFactory.from_defaults(**llm_params)
service_context = LyzrService.from_defaults(
llm=llm,
embed_model=embed_model,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
**service_context_params,
)
vector_store_index = LyzrVectorStoreIndex.from_defaults(
**vector_store_params, documents=documents, service_context=service_context
)
return vector_store_index.as_query_engine(**query_engine_params, similarity_top_k=5)
# Path: lyzr/utils/rag_utils.py
def docx_rag(
input_dir: Optional[str] = None,
input_files: Optional[List] = None,
exclude_hidden: bool = True,
filename_as_id: bool = True,
recursive: bool = True,
required_exts: Optional[List[str]] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
query_engine_params: dict = None,
) -> BaseQueryEngine:
documents = read_docx_as_documents(
input_dir=input_dir,
input_files=input_files,
exclude_hidden=exclude_hidden,
filename_as_id=filename_as_id,
recursive=recursive,
required_exts=required_exts,
)
llm_params = {} if llm_params is None else llm_params
vector_store_params = (
{"vector_store_type": "LanceDBVectorStore"}
if vector_store_params is None
else vector_store_params
)
service_context_params = (
{} if service_context_params is None else service_context_params
)
query_engine_params = {} if query_engine_params is None else query_engine_params
llm = LyzrLLMFactory.from_defaults(**llm_params)
service_context = LyzrService.from_defaults(
llm=llm,
embed_model=embed_model,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
**service_context_params,
)
vector_store_index = LyzrVectorStoreIndex.from_defaults(
**vector_store_params, documents=documents, service_context=service_context
)
return vector_store_index.as_query_engine(**query_engine_params, similarity_top_k=5)
# Path: lyzr/utils/rag_utils.py
def webpage_rag(
url: str = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
query_engine_params: dict = None,
) -> BaseQueryEngine:
documents = read_webpage_as_documents(
url=url,
)
llm_params = {} if llm_params is None else llm_params
vector_store_params = (
{"vector_store_type": "LanceDBVectorStore"}
if vector_store_params is None
else vector_store_params
)
service_context_params = (
{} if service_context_params is None else service_context_params
)
query_engine_params = {} if query_engine_params is None else query_engine_params
llm = LyzrLLMFactory.from_defaults(**llm_params)
service_context = LyzrService.from_defaults(
llm=llm,
embed_model=embed_model,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
**service_context_params,
)
vector_store_index = LyzrVectorStoreIndex.from_defaults(
**vector_store_params, documents=documents, service_context=service_context
)
return vector_store_index.as_query_engine(**query_engine_params, similarity_top_k=5)
# Path: lyzr/utils/rag_utils.py
def website_rag(
url: str = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
query_engine_params: dict = None,
) -> BaseQueryEngine:
documents = read_website_as_documents(
url=url,
)
llm_params = {} if llm_params is None else llm_params
vector_store_params = (
{"vector_store_type": "LanceDBVectorStore"}
if vector_store_params is None
else vector_store_params
)
service_context_params = (
{} if service_context_params is None else service_context_params
)
query_engine_params = {} if query_engine_params is None else query_engine_params
llm = LyzrLLMFactory.from_defaults(**llm_params)
service_context = LyzrService.from_defaults(
llm=llm,
embed_model=embed_model,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
**service_context_params,
)
vector_store_index = LyzrVectorStoreIndex.from_defaults(
**vector_store_params, documents=documents, service_context=service_context
)
return vector_store_index.as_query_engine(**query_engine_params, similarity_top_k=5)
# Path: lyzr/utils/rag_utils.py
def youtube_rag(
urls: List[str] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
query_engine_params: dict = None,
) -> BaseQueryEngine:
documents = read_youtube_as_documents(
urls=urls,
)
llm_params = {} if llm_params is None else llm_params
vector_store_params = (
{"vector_store_type": "LanceDBVectorStore"}
if vector_store_params is None
else vector_store_params
)
service_context_params = (
{} if service_context_params is None else service_context_params
)
query_engine_params = {} if query_engine_params is None else query_engine_params
llm = LyzrLLMFactory.from_defaults(**llm_params)
service_context = LyzrService.from_defaults(
llm=llm,
embed_model=embed_model,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
**service_context_params,
)
vector_store_index = LyzrVectorStoreIndex.from_defaults(
**vector_store_params, documents=documents, service_context=service_context
)
return vector_store_index.as_query_engine(**query_engine_params, similarity_top_k=5)
# Path: lyzr/chatqa/qa_bot.py
from typing import Union, Optional, List
from llama_index import ServiceContext, VectorStoreIndex
from llama_index.embeddings.utils import EmbedType
from llama_index.indices.query.base import BaseQueryEngine
from lyzr.utils.rag_utils import (
pdf_rag,
txt_rag,
docx_rag,
webpage_rag,
website_rag,
youtube_rag,
)
)
@staticmethod
def docx_qa(
input_dir: Optional[str] = None,
input_files: Optional[List] = None,
exclude_hidden: bool = True,
filename_as_id: bool = True,
recursive: bool = True,
required_exts: Optional[List[str]] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
query_engine_params: dict = None,
) -> BaseQueryEngine:
return docx_rag(
input_dir=input_dir,
input_files=input_files,
exclude_hidden=exclude_hidden,
filename_as_id=filename_as_id,
recursive=recursive,
required_exts=required_exts,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
embed_model=embed_model,
llm_params=llm_params,
vector_store_params=vector_store_params,
service_context_params=service_context_params,
query_engine_params=query_engine_params,
)
@staticmethod
def txt_qa(
input_dir: Optional[str] = None,
input_files: Optional[List] = None,
exclude_hidden: bool = True,
filename_as_id: bool = True,
recursive: bool = True,
required_exts: Optional[List[str]] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
query_engine_params: dict = None,
) -> BaseQueryEngine:
return txt_rag(
input_dir=input_dir,
input_files=input_files,
exclude_hidden=exclude_hidden,
filename_as_id=filename_as_id,
recursive=recursive,
required_exts=required_exts,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
embed_model=embed_model,
llm_params=llm_params,
vector_store_params=vector_store_params,
service_context_params=service_context_params,
query_engine_params=query_engine_params,
)
@staticmethod
def webpage_qa(
url: Optional[str] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
query_engine_params: dict = None,
) -> BaseQueryEngine:
return webpage_rag(
url=url,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
embed_model=embed_model,
llm_params=llm_params,
vector_store_params=vector_store_params,
service_context_params=service_context_params,
query_engine_params=query_engine_params,
)
@staticmethod
def website_qa(
url: Optional[str] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
query_engine_params: dict = None,
) -> BaseQueryEngine:
return website_rag(
url=url,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
embed_model=embed_model,
llm_params=llm_params,
vector_store_params=vector_store_params,
service_context_params=service_context_params,
query_engine_params=query_engine_params,
)
@staticmethod
def youtube_qa(
urls: List[str] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
query_engine_params: dict = None,
| ) -> BaseQueryEngine: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: weiwei-cool/FanQieNovelDownloadOnWeb
# Path: tools/Fanqie.py
class FanqieNovel:
def __init__(self, url, mode):
def __str__(self):
def parse_url(self, url: str) -> str:
# Path: tools/DownloadNovel.py
class DownloadNovel(threading.Thread):
"""
下载小说,应传入番茄对象
"""
def __init__(self, fanqie: Fanqie):
# 番茄小说对象
self.fanqie: Fanqie = fanqie
# 停止子进程
self._stop_flag = False
self._stop_event = threading.Event()
# 自定义WebDav路径
self.is_webdav = os.environ.get('IS_WEBDAV')
if self.is_webdav:
self.webdav_username = os.environ.get('WEBDAV_USERNAME')
self.webdav_pwd = os.environ.get('WEBDAV_PWD')
self.webdav_url = os.environ.get('WEBDAV_URL')
self.webdav = Client(base_url=self.webdav_url,
auth=(self.webdav_username, self.webdav_pwd))
tools.logger.info(f'已成功加载webdav服务器({self.webdav_url})')
# 自定义保存路径
self.custom_path = os.environ.get('CUSTOM_PATH')
if not self.custom_path:
self.custom_path = './books'
os.makedirs(self.custom_path, exist_ok=True)
tools.logger.warning(f'您未设置自定义保存路径,将使用默认路径: {self.custom_path}')
super().__init__()
def run(self) -> None:
# 数据库中获取小说对象
history_entry = History.objects.get(obid=self.fanqie.obid)
tools.logger.info(f'开始下载小说: \n{self.fanqie.__str__()}')
# 判断下载模式
if self.fanqie.mode == 'txt':
tools.logger.info(f'正在以txt模式下载小说')
content = f"""{self.fanqie.title}
{self.fanqie.intro}
"""
# 获取所有章节链接
start_index = 0
file_name = self.fanqie.title + ".txt"
file_path = os.path.join(self.custom_path, file_name)
# 获取章节数
chapters = self.fanqie.soup.find_all("div", class_="chapter-item")
chapter_num = len(chapters)
chapter_num_now = 0
try:
# 遍历每个章节链接
for chapter in chapters[start_index:]:
if self._stop_event.is_set():
break
time.sleep(0.25)
if self._stop_event.is_set():
break
# 获取章节标题
chapter_title = chapter.find("a").get_text()
# 获取章节网址
chapter_url = urljoin(self.fanqie.url, chapter.find("a")["href"])
# 获取章节 id
chapter_id = re.search(r"/(\d+)", chapter_url).group(1)
# 构造 api 网址
api_url = (f"https://novel.snssdk.com/api/novel/book/reader/full/v1/?device_platform=android&"
f"parent_enterfrom=novel_channel_search.tab.&aid=2329&platform_id=1&group_id="
f"{chapter_id}&item_id={chapter_id}")
# 尝试获取章节内容
chapter_content = None
retry_count = 1
while retry_count < 4: # 设置最大重试次数
if self._stop_event.is_set():
break
def get_api():
# 获取 api 响应
api_response_ = requests.get(api_url, headers=self.fanqie.headers)
# 解析 api 响应为 json 数据
api_data_ = api_response_.json()
return api_data_
api_data = None
retry_get_api = 1
while retry_get_api < 4:
try:
api_data = get_api()
except Exception as e:
tools.logger.error(f'错误!{e}')
else:
break
retry_get_api += 1
if "data" in api_data and "content" in api_data["data"]:
chapter_content = api_data["data"]["content"]
break # 如果成功获取章节内容,跳出重试循环
else:
if retry_count == 1:
tools.logger.warning(f'{chapter_title} 获取失败,正在尝试重试...')
tools.logger.warning(f'第 ({retry_count}/3) 次重试获取章节内容')
retry_count += 1 # 否则重试
if retry_count == 4:
tools.logger.error(f'无法获取章节内容: {chapter_title},跳过。')
continue # 重试次数过多后,跳过当前章节
# 提取文章标签中的文本
chapter_text = re.search(r"<article>([\s\S]*?)</article>", chapter_content).group(1)
# 将 <p> 标签替换为换行符
chapter_text = re.sub(r"<p>", "\n", chapter_text)
# 去除其他 html 标签
chapter_text = re.sub(r"</?\w+>", "", chapter_text)
chapter_text = tools.fix_publisher(chapter_text)
# 在小说内容字符串中添加章节标题和内容
content += f"\n\n\n{chapter_title}\n{chapter_text}"
chapter_num_now += 1
history_entry.percent = round(
(chapter_num_now / chapter_num) * 100, 2)
history_entry.save()
# 打印进度信息
tools.logger.info(f'已获取 {chapter_title}, 进度:{history_entry.percent}%')
# 根据编码转换小说内容字符串为二进制数据
data = content.encode('utf-8', errors='ignore')
# 保存文件
with open(file_path, "wb") as f:
f.write(data)
file_path = os.path.join(self.custom_path, file_name)
file_path = Path(file_path)
if self.is_webdav:
self.webdav.upload_file(from_path=file_path,
to_path=os.path.join('/public', file_name),
overwrite=True)
tools.logger.info(f'《{self.fanqie.title}》已成功上传webdav服务器')
# 打印完成信息
tools.logger.info(f'已保存{self.fanqie.title}.txt至本地')
except BaseException as e:
# 捕获所有异常,及时保存文件
tools.logger.error(f'发生异常: \n{e}')
tools.logger.info('正在尝试保存文件')
# 根据编码转换小说内容字符串为二进制数据
data = content.encode('utf-8', errors='ignore')
# 保存文件
file_path = os.path.join(self.custom_path, file_name)
with open(file_path, "wb") as f:
f.write(data)
tools.logger.info('文件已保存!')
return
elif self.fanqie.mode == 'epub':
tools.logger.info(f'正在以epub模式下载小说')
# 创建epub电子书
book = epub.EpubBook()
# 下载封面
response = requests.get(self.fanqie.img_url)
# 获取图像的内容
img_data = response.content
# 保存图像到本地文件
with open("cover.jpg", "wb") as f:
f.write(img_data)
# 创建一个封面图片
book.set_cover("image.jpg", open('cover.jpg', 'rb').read())
# 删除封面
os.remove('cover.jpg')
# 设置书的元数据
book.set_title(self.fanqie.title)
book.set_language('zh-CN')
book.add_author(self.fanqie.author_name)
book.add_metadata('DC', 'description', self.fanqie.intro)
# 获取卷标
page_directory_content = self.fanqie.soup.find('div', class_='page-directory-content')
nested_divs = page_directory_content.find_all('div', recursive=False)
# intro chapter
intro_e = epub.EpubHtml(title='Introduction', file_name='intro.xhtml', lang='hr')
intro_e.content = (f'<html><head></head><body>'
f'<img src="image.jpg" alt="Cover Image"/>'
f'<h1>{self.fanqie.title}</h1>'
f'<p>{self.fanqie.intro}</p>'
f'</body></html>')
book.add_item(intro_e)
# 创建索引
book.toc = (epub.Link('intro.xhtml', '简介', 'intro'),)
book.spine = ['nav', intro_e]
# 获取章节数
chapters = self.fanqie.soup.find_all("div", class_="chapter-item")
chapter_num = len(chapters)
chapter_num_now = 0
try:
volume_id = 0
# 遍历每个卷
for div in nested_divs:
if self._stop_event.is_set():
break
first_chapter = None
volume_id += 1
volume_div = div.find('div', class_='volume')
# 提取 "卷名" 文本
volume_title = volume_div.text
tools.logger.info(f'正在获取{volume_title}')
chapters = div.find_all("div", class_="chapter-item")
start_index = None
for i, chapter in enumerate(chapters):
if self._stop_event.is_set():
break
chapter_url_tmp = urljoin(self.fanqie.url, chapter.find("a")["href"])
chapter_id_tmp = re.search(r"/(\d+)", chapter_url_tmp).group(1)
if chapter_id_tmp == '0': # epub模式不支持起始章节
start_index = i
# 定义目录索引
toc_index = ()
chapter_id_name = 0
# 遍历每个章节链接
for chapter in chapters[start_index:]:
chapter_id_name += 1
if self._stop_event.is_set():
break
time.sleep(0.25)
if self._stop_event.is_set():
break
# 获取章节标题
chapter_title = chapter.find("a").get_text()
# 获取章节网址
chapter_url = urljoin(self.fanqie.url, chapter.find("a")["href"])
# 获取章节 id
chapter_id = re.search(r"/(\d+)", chapter_url).group(1)
# 构造 api 网址
api_url = (f"https://novel.snssdk.com/api/novel/book/reader/full/v1/?device_platform=android&"
f"parent_enterfrom=novel_channel_search.tab.&aid=2329&platform_id=1&group_id="
f"{chapter_id}&item_id={chapter_id}")
# 尝试获取章节内容
chapter_content = None
retry_count = 1
while retry_count < 4: # 设置最大重试次数
if self._stop_event.is_set():
break
def get_api():
# 获取 api 响应
api_response_ = requests.get(api_url, headers=self.fanqie.headers)
# 解析 api 响应为 json 数据
api_data_ = api_response_.json()
return api_data_
api_data = None
retry_get_api = 1
while retry_get_api < 4:
try:
api_data = get_api()
except Exception as e:
tools.logger.error(f'发生异常: \n{e}')
else:
break
retry_get_api += 1
if "data" in api_data and "content" in api_data["data"]:
chapter_content = api_data["data"]["content"]
break # 如果成功获取章节内容,跳出重试循环
else:
if retry_count == 1:
tools.logger.warning(f'{chapter_title} 获取失败,正在尝试重试...')
tools.logger.warning(f'第 ({retry_count}/3) 次重试获取章节内容')
retry_count += 1 # 否则重试
if retry_count == 4:
tools.logger.error(f'无法获取章节内容: {chapter_title},跳过。')
continue # 重试次数过多后,跳过当前章节
# 提取文章标签中的文本
chapter_text = re.search(r"<article>([\s\S]*?)</article>", chapter_content).group(1)
# 在小说内容字符串中添加章节标题和内容
text = epub.EpubHtml(title=chapter_title,
file_name=f'chapter_{volume_id}_{chapter_id_name}.xhtml')
text.content = (f'<h2>{chapter_title}</h2>'
f'{chapter_text}')
toc_index = toc_index + (text,)
book.spine.append(text)
# 寻找第一章
if chapter_id_name == 1:
first_chapter = f'chapter_{volume_id}_{chapter_id_name}.xhtml'
# 加入epub
book.add_item(text)
chapter_num_now += 1
history_entry.percent = round(
(chapter_num_now / chapter_num) * 100, 2)
history_entry.save()
# 打印进度信息
tools.logger.info(f'已获取 {chapter_title}, 进度:{history_entry.percent}%')
# 加入书籍索引
book.toc = book.toc + ((epub.Section(volume_title, href=first_chapter),
toc_index,),)
# 捕获异常
except BaseException as e:
# 捕获所有异常
tools.logger.error(f'发生异常: \n{e}')
return
# 添加 navigation 文件
book.add_item(epub.EpubNcx())
book.add_item(epub.EpubNav())
# 拼接文件名和文件路径
file_name = self.fanqie.title + ".epub"
file_path = os.path.join(self.custom_path, file_name)
# 书写电子书
epub.write_epub(file_path, book, {})
# webdav上传
file_path = Path(file_path)
if self.is_webdav:
self.webdav.upload_file(from_path=file_path,
to_path=os.path.join('/public', file_name),
overwrite=True)
tools.logger.info(f'《{self.fanqie.title}》已成功上传webdav服务器')
tools.logger.info(f'已保存{self.fanqie.title}.epub至本地')
# 停止子进程函数
def stop(self):
self._stop_event.set()
# Path: Api/models.py
class History(models.Model):
file_name = models.CharField(max_length=255)
percent = models.FloatField(default=0)
book_id = models.CharField(max_length=255)
obid = models.CharField(max_length=255)
objects = models.Manager()
# Path: Api/views.py
import os
import tools
import json
from django.http import JsonResponse
from tools import Fanqie, DownloadNovel
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from .models import History
# 下载的小说集合
download_object = []
@csrf_exempt # 为了允许跨域请求,可选
@require_POST # 确保只接受POST请求,可选
@tools.logger.catch # 获取详细的报错信息
def download(request): # 下载接口
global download_object
if request.method == 'POST':
try:
# 获取url数据
tools.logger.info('正在获取url数据……') # 打印日志
data = json.loads(request.body.decode('utf-8'))
urls = data.get('urls', [])
# 初步去重
urls = list(set(urls))
tools.logger.info(f'已获取urls为:{urls}')
# 获取下载方式
format_ = data.get('format', 'txt')
tools.logger.info(f'下载方式为{format_}')
# 获取书本信息
books = []
[books.append(Fanqie.FanqieNovel(url, format_)) for url in urls]
[tools.logger.info(f'下载书籍:\n{book.__str__()}') for book in books]
# 查看重复下载的书籍
return_url = []
# 开启下载进程
for i in books:
try:
history_ = History.objects.get(obid=i.obid)
if history_.obid == i.obid:
tools.logger.warning(f'《{i.title}》重复提交!')
return_url.append(i.url)
continue
except Exception as e:
tools.logger.info(f'《{i.title}》未重复, 已返回:{e}')
b = History(book_id=i.book_id, obid=i.obid, file_name=f'{i.title}.{format_}', percent=0)
b.save()
d = DownloadNovel.DownloadNovel(i)
download_object.append({'obid': i.obid, 'obj': d, 'book': i})
d.start()
tools.logger.info(f'《{i.title}》已开始下载')
# 返回成功和重复的数据
response_data = {'message': 'Download request received', 'urls': urls, 'return': return_url}
return JsonResponse(response_data, status=200)
except Exception as e:
tools.logger.error(f'发生异常: \n{e}')
return JsonResponse({'error': str(e)}, status=500)
return JsonResponse({'error': 'Invalid request method'}, status=405)
def download_del(_request, pk): # 删除任务中的小说
global download_object
try:
history_ = History.objects.filter(obid=pk)
for j in history_:
for i in download_object:
if i['obid'] == pk:
i['obj'].stop()
tools.logger.info(f'《{i["book"].title}》已从下载列表中移除')
j.delete()
response_data = {'status': 'ok'}
return JsonResponse(response_data, status=200)
except Exception as e:
tools.logger.error(f'错误!{e}')
return JsonResponse({'status': 'error', 'error': str(e)}, status=400)
@csrf_exempt # 为了允许跨域请求,可选
def history(_request): # 查询所有正在任务中的小说
records = History.objects.all()
response_data = {'history': []}
for record in records:
tools.logger.info(f'查询正在任务中的小说:'
f'{record.file_name}(obid: {record.obid}) 已下载 {record.percent}%')
response_data['history'].append({'book_id': record.book_id,
'obid': record.obid,
'file_name': record.file_name,
'percent': record.percent})
response_data['history'] = response_data['history'][::-1]
return JsonResponse(response_data, status=200)
def history_id(_request, pk): # 根据具体obid查询小说下载数据
history_entry = History.objects.get(obid=pk)
tools.logger.info(f'查询正在任务中的小说:'
| f'{history_entry.file_name}(obid: {history_entry.obid}) 已下载 {history_entry.percent}%') |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: StoneMoe/ASub
# Path: app/core/models/project.py
class Project:
path: str # 工程目录(相对位置)
name: str # 工程名称
def __init__(self, name: str, existed_err=False):
self.name = name
self.path = os.path.join(Core.PROJ_DIR, name)
try:
os.makedirs(self.path)
info(f'已创建目录 {self.path}')
except OSError as e: # directory existed
if existed_err:
raise e
def _prepare(self):
info(f'正在预处理 "{self.name}" 的音频')
tmp_path = os.path.join(self.path, 'source.wav')
tmp_file = test_files(tmp_path)
src_file = test_files(
os.path.join(self.path, 'source.mp4'),
os.path.join(self.path, f'{self.name}.mp4'),
os.path.join(self.path, f'{self.name}.mp3')
)
if tmp_file:
info(f'找到了临时文件 "{tmp_file}",跳过预处理')
elif src_file:
info(f'找到了 "{src_file}",开始预处理')
if check_ffmpeg() != FFMpegStatus.READY:
raise EnvironmentError('FFMpeg尚未安装')
proc: Popen[bytes] = ffmpeg.input(src_file) \
.output(tmp_path, format='wav', acodec='pcm_s16le', ac=1, ar=16000) \
.overwrite_output() \
.run_async(pipe_stdout=True, pipe_stderr=True)
out, err = proc.communicate()
return_code = proc.wait()
if return_code != 0:
raise ChildProcessError('无法提取音频')
info('预处理成功')
else:
raise FileNotFoundError(f'请将同名 mp4 文件放置在 {self.path}')
def delete(self):
"""Delete project folder"""
shutil.rmtree(self.path)
def transcribe(self, opt: TranscribeOpt):
"""
transcribe wav audio to SRT
:return: transcribe result file path
"""
self._prepare()
target_file = opt.make_srt_filepath(name=self.name, path=self.path)
if os.path.isfile(target_file):
info(f'文件 "{target_file}" 已存在,跳过听写')
return target_file
info(f'使用 {opt}')
match opt.backend:
# case Engine.CPP_CPU:
# ext = ''
# if opt.compress_ratio_threshold:
# ext += f' -et {opt.compress_ratio_threshold} '
# if opt.prompt_name:
# ext += f' --prompt "{DB.PROMPTS[opt.prompt_name]}" '
# if opt.speedup:
# ext += f' -su '
# if opt.ss and opt.t:
# ss = opt.ss * 1000
# t = opt.t * 1000
# if opt.speedup:
# ss /= 2
# t /= 2
# ext += f' -ot {ss} -d {t} '
# cmd = f".\\whisper\\main.exe -m data/whisper_model/ggml-large-v2.bin " \
# f"-pp -osrt -l {opt.lang} -t 8 {ext} -f {self.path}/source.wav -of {target_file.rstrip('.srt')}"
# print(f'运行: {cmd}')
# proc = subprocess.Popen(cmd, shell=True, cwd=os.getcwd(), stdout=subprocess.PIPE)
# for line in proc.stdout:
# print(line.decode(Core.CODEC).rstrip())
case 'py-gpu' | 'py-cpu':
info('正在加载模型')
import whisper
import torch
model = whisper.load_model(opt.model, download_root='whisper_model', device='cpu')
if opt.quantize:
info('正在量化模型')
model = torch.quantization.quantize_dynamic(
model, {torch.nn.Linear}, dtype=torch.qint8
)
if opt.backend == 'py-gpu':
info('正在加载至显卡')
model.to('cuda')
result = model.transcribe(
audio=f'{self.path}/source.wav',
language=opt.lang,
compression_ratio_threshold=opt.compress_ratio_threshold,
initial_prompt=Consts.PROMPTS[opt.prompt_name],
verbose=True,
)
del model
torch.cuda.empty_cache()
segments = result['segments']
srt = SRTFile(source=segments)
srt.dump(target_file)
case _:
raise NotImplementedError(f'{opt.backend} 引擎尚未支持')
info('听写完成')
def translate(self, opt: TranscribeOpt, vocab=None):
srt = SRTFile(source=opt.make_srt_filepath(self.name, self.path))
srt.translate(vocab=vocab)
@classmethod
def list(cls) -> List[str]:
"""list all projects"""
names = os.listdir(Core.PROJ_DIR)
directories = [name for name in names if os.path.isdir(os.path.join(Core.PROJ_DIR, name))]
directories = sort_titles(directories)
return directories
@classmethod
def bulk_create(cls, targets: List[tuple]):
info(f'正在创建 {len(targets)} 个工程')
for proj_name, filepath in targets:
try:
proj = Project(proj_name, existed_err=True)
except OSError:
info(f'"{proj_name}" 已存在,不再创建')
continue
if filepath:
dst_filepath = os.path.join(proj.path, os.path.basename(filepath))
info(f'正在将 {filepath} 复制到 {dst_filepath}')
shutil.copy(filepath, dst_filepath)
info('复制完毕')
# Path: app/core/models/project.py
class TranscribeOpt:
"""
:param backend: whisper implementation
:param model: whisper model name
:param quantize: whisper model quantization switch
:param ss: transcribe start second
:param t: transcribe time duration(second)
:param compress_ratio_threshold: 2.4 ~ 3 is recommended, segments higher than this will be re-inferenced
:param speedup: double speed, decrease quality
:param prompt_name: name
"""
backend: str
model: str
quantize: bool
lang: Optional[str]
ss: int # TODO: implement in whisper.py mode
t: int # TODO: implement in whisper.py mode
compress_ratio_threshold: float
speedup: bool # TODO: implement in whisper.py mode
prompt_name: str
def make_srt_filepath(self, name: str, path: str) -> str:
return f'{path}/' \
f'{name}' \
f'[{self.backend}]' \
f'[{self.model}]' \
f'[q{int(self.quantize)}]' \
f'[L{self.lang or "auto"}]' \
f'[t{"FULL" if not (self.ss and self.t) else f"{self.ss}-{self.ss + self.t}"}]' \
f'[e{self.compress_ratio_threshold}]' \
f'[s{int(self.speedup)}]' \
f'[p{self.prompt_name or "-"}]' \
f'.srt'
# Path: app/core/utils/generic.py
def info(text):
print(f"ℹ️{text}")
# Path: app/ui/components/label.py
class AutoLabel(QLabel):
def __init__(self, text, parent=None, elide_mode=None):
super().__init__(text, parent)
self._raw_text = text
self._elide_mode = elide_mode if elide_mode is not None else Qt.ElideMiddle
self._eliding = False
def _get_elided_text(self):
return self.fontMetrics().elidedText(self._raw_text, self._elide_mode, self.width())
def resizeEvent(self, event: QtGui.QResizeEvent):
super().resizeEvent(event)
if self._eliding:
return
self._eliding = True
super().setText(self._get_elided_text())
self._eliding = False
def setText(self, text):
self._raw_text = text
super().setText(self._get_elided_text())
# Path: app/ui/config.py
class Engine(Enum):
class TranscribeModel(Enum):
class UILang(Enum):
class TranscribeLang(Enum):
class Config(QConfig):
PY_CPU = "py-cpu"
PY_GPU = "py-gpu"
CPP_CPU = "cpp-cpu"
LARGE_V2 = "large-v2"
MEDIUM = "medium"
SMALL = "small"
BASE = "base"
TINY = "tiny"
CHINESE_SIMPLIFIED = "chs"
CHINESE_TRADITIONAL = "cht"
ENGLISH = "en"
AUTO = "auto"
AUTO = None
def options(cls):
# Path: app/ui/const.py
CONTAINER_MARGINS = (32, 64, 32, 32)
# Path: app/ui/utils.py
def run_in_thread(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if args and kwargs:
t = threading.Thread(target=func, args=args, kwargs=kwargs)
elif args:
t = threading.Thread(target=func, args=args)
else:
t = threading.Thread(target=func)
t.daemon = True
t.start()
return t
return wrapper
# Path: app/ui/utils.py
def clear_layout(layout):
while layout.count():
child = layout.takeAt(0)
if child.widget():
child.widget().deleteLater()
elif child.layout():
clear_layout(child.layout())
# Path: app/ui/utils.py
def open_folder(folder_path):
"""Open specific folder in file explorer application"""
if os.name == 'nt': # Windows
os.startfile(folder_path)
elif os.name == 'posix': # Linux, macOS, etc.
subprocess.Popen(['xdg-open', folder_path])
else:
raise OSError(f'Unsupported platform: {os.name}')
# Path: app/ui/windows/subtitle_window.py
class SubtitleWindow(QDialog, FramelessWindow):
def __init__(self, filepath: str, parent=None):
super().__init__(parent)
self.srt_file = SRTFile(filepath)
self.hBoxLayout = QVBoxLayout(self)
self.tableView = TableWidget(self)
self.saveButton = QPushButton("Save", self)
self.saveButton.clicked.connect(self._save_subtitle_file)
self.hBoxLayout.setContentsMargins(*CONTAINER_MARGINS)
self.hBoxLayout.addWidget(self.tableView)
self.hBoxLayout.addWidget(self.saveButton)
self.init_window()
self._load_subtitle_file()
def _load_subtitle_file(self):
self.tableView.setWordWrap(False)
self.tableView.setRowCount(len(self.srt_file.entries))
self.tableView.setColumnCount(3)
for i, entry in enumerate(self.srt_file.entries):
self.tableView.setItem(i, 0, QTableWidgetItem(entry.index))
self.tableView.setItem(i, 1, QTableWidgetItem(entry.time))
self.tableView.setItem(i, 2, QTableWidgetItem(entry.text))
self.tableView.verticalHeader().hide()
self.tableView.setHorizontalHeaderLabels(['Index', 'Time', 'Text'])
self.tableView.resizeColumnsToContents()
def _save_subtitle_file(self):
for i in range(self.tableView.rowCount()):
self.srt_file.entries[i].index = self.tableView.item(i, 0).text()
self.srt_file.entries[i].time = self.tableView.item(i, 1).text()
self.srt_file.entries[i].text = self.tableView.item(i, 2).text()
self.srt_file.dump()
def init_window(self):
self.setWindowTitle(f'编辑 {self.srt_file.filepath}')
self.resize(625, 700)
self._set_qss()
def _set_qss(self):
color = 'dark' if isDarkTheme() else 'light'
with open(res_dir(f'app/ui/resource/qss/{color}/style.qss'), encoding='utf-8') as f:
self.setStyleSheet(f.read())
# Path: app/ui/views/project_view.py
import os
from typing import Optional
from PyQt5.QtCore import pyqtSignal, QPoint, Qt
from PyQt5.QtWidgets import QFrame, QVBoxLayout, QHBoxLayout, QAction
from qfluentwidgets import PushButton, FluentIcon, RoundMenu, ToolButton, MessageBox, StateToolTip
from app.core.models.project import Project, TranscribeOpt
from app.core.utils.generic import info
from app.ui.components.label import AutoLabel
from app.ui.config import cfg
from app.ui.const import CONTAINER_MARGINS
from app.ui.utils import run_in_thread, clear_layout, open_folder
from app.ui.windows.subtitle_window import SubtitleWindow
class ProjectView(QFrame):
sig_subtitle_list_loaded = pyqtSignal(list)
sig_transcribe_running = pyqtSignal(bool)
def __init__(self, parent=None):
super().__init__(parent=parent)
self.setObjectName('proj-view')
self.project: Optional[Project] = None
self.state_tooltip = None
self.layout = QVBoxLayout(self)
self.layout_title = QHBoxLayout(self)
self.layout_subtitles = QVBoxLayout(self)
self.label_title = AutoLabel('<Loading>', self, Qt.ElideMiddle)
self.label_title.setObjectName('ViewTitle')
self.btn_manage = ToolButton(FluentIcon.MORE, self)
self.btn_manage.clicked.connect(
lambda: self._on_btn_manage_clicked(
self.btn_manage.mapToGlobal(QPoint()) + QPoint(self.btn_manage.width() + 5, 10)
)
)
self.btn_transcribe = PushButton('开始听写', self, FluentIcon.SEND_FILL)
self.btn_transcribe.clicked.connect(self._run_transcribe)
self._init_signal()
self._init_layout()
def set_project(self, project: Project):
self.project = project
self.label_title.setText(self.project.name)
self.label_title.setToolTip(self.project.name)
self._reload_subtitle_list()
def _init_layout(self):
self.layout_title.addWidget(self.label_title)
self.layout_title.addWidget(self.btn_manage)
self.layout.addLayout(self.layout_title)
self.layout.addLayout(self.layout_subtitles)
self.layout.addStretch(1)
self.layout.addWidget(self.btn_transcribe)
self.layout.setContentsMargins(*CONTAINER_MARGINS)
def _init_signal(self):
self.sig_subtitle_list_loaded.connect(self._on_subtitle_list_loaded)
self.sig_transcribe_running.connect(self._on_transcribe_running_changed)
def _on_transcribe_running_changed(self, running: bool):
if self.state_tooltip is None:
self.state_tooltip = StateToolTip('正在听写中', '请耐心等待', self)
self.state_tooltip.closeButton.hide()
if running:
self.btn_transcribe.setDisabled(True)
self.state_tooltip.move(10, 10)
self.state_tooltip.show()
else:
self.btn_transcribe.setDisabled(False)
self.state_tooltip.setState(True)
self.state_tooltip.setTitle('听写完成!')
self.state_tooltip.setContent('')
self.state_tooltip = None
def _on_subtitle_list_loaded(self, filenames: list):
clear_layout(self.layout_subtitles)
for filename in filenames:
layout = QHBoxLayout(self)
label = AutoLabel(filename, self, Qt.ElideLeft)
label.setToolTip(filename)
btn_translate = ToolButton(FluentIcon.EDIT, self)
btn_translate.setToolTip('编辑')
btn_translate.clicked.connect(self._on_subtitle_edit_clicked(filename))
btn_delete = ToolButton(FluentIcon.DELETE, self)
btn_delete.setToolTip('删除')
btn_delete.clicked.connect(self._on_subtitle_delete_clicked(filename))
layout.addWidget(label)
layout.addWidget(btn_translate)
layout.addWidget(btn_delete)
self.layout_subtitles.addLayout(layout)
def _reload_subtitle_list(self):
self.sig_subtitle_list_loaded.emit(
[
filename
for filename in os.listdir(self.project.path)
if filename.endswith('.srt') or filename.endswith('.ass')
]
)
def _on_subtitle_edit_clicked(self, filename):
def f():
target_file = os.path.join(self.project.path, filename)
edit_win = SubtitleWindow(target_file)
edit_win.exec_()
return f
def _on_subtitle_delete_clicked(self, filename):
def f():
target_file = os.path.join(self.project.path, filename)
if MessageBox('删除确认', f'真的要删除 {target_file} 吗?', self.window()).exec():
| os.remove(target_file)
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: NicolasZucchet/Online-learning-LR-dependencies
# Path: online_lru/rec.py
class LRU(nn.Module):
"""
LRU layer that updates internal elegibility traces to allow online learning.
"""
d_hidden: int # hidden state dimension
d_model: int # input and output dimensions
seq_length: int # time sequence length
gamma_norm: bool = True # use gamma normalization
exp_param: bool = True # exponential parametrization for lambda
r_min: float = 0.0 # smallest eigenvalue norm
r_max: float = 1.0 # largest eigenvalue norm
max_phase: float = 6.28 # max phase eigenvalue
training_mode: str = "bptt" # which learning algorithm that will be used
training: bool = False # TODO remove, for debugging purposes
def get_diag_lambda(self, nu=None, theta=None):
"""
Transform parameters nu and theta into the diagonal of the recurrent
Lambda matrix.
Args:
nu, theta array[N]: when set to their default values, None, the
parameters will take the values of the Module.
NOTE: these arguments are added in order to backpropagate through this
transformation.
"""
if nu is None:
nu = self.nu
if theta is None:
theta = self.theta
if self.exp_param:
theta = jnp.exp(theta)
nu = jnp.exp(nu)
return jnp.exp(-nu + 1j * theta)
def get_diag_gamma(self):
"""
Transform parameters gamma_log into the diagonal terms of the modulation matrix gamma.
"""
if self.gamma_norm:
return jnp.exp(self.gamma_log)
else:
return jnp.ones((self.d_hidden,))
def get_B(self):
"""
Get input to hidden matrix B.
"""
return self.B_re + 1j * self.B_im
def get_B_norm(self):
"""
Get modulated input to hidden matrix gamma B.
"""
return self.get_B() * jnp.expand_dims(self.get_diag_gamma(), axis=-1)
def to_output(self, inputs, hidden_states):
"""
Compute output given inputs and hidden states.
Args:
inputs array[T, H].
hidden_states array[T, N].
"""
C = self.C_re + 1j * self.C_im
D = self.D
y = jax.vmap(lambda x, u: (C @ x).real + D * u)(hidden_states, inputs)
return y
def get_hidden_states(self, inputs):
"""
Compute the hidden states corresponding to inputs
Return:
hidden_states array[T, N]
"""
# Materializing the diagonal of Lambda and projections
diag_lambda = self.get_diag_lambda()
B_norm = self.get_B_norm()
# Running the LRU + output projection
# For details on parallel scan, check discussion in Smith et al (2022).
Lambda_elements = jnp.repeat(diag_lambda[None, ...], inputs.shape[0], axis=0)
Bu_elements = jax.vmap(lambda u: B_norm @ u)(inputs)
elements = (Lambda_elements, Bu_elements)
if self.training_mode == "bptt":
_, hidden_states = jax.lax.associative_scan(binary_operator_diag, elements)
else:
_, hidden_states = jax.lax.associative_scan(binary_operator_diag_spatial, elements)
return hidden_states
def setup(self):
# Check that desired approximation is handled
if self.training_mode == "online_snap1":
raise NotImplementedError("SnAp-1 not implemented for LRU")
assert self.training_mode in [
"bptt",
"online_full",
"online_full_rec",
"online_full_rec_simpleB",
"online_snap1", # same as online_full
"online_spatial",
"online_1truncated",
"online_reservoir",
]
self.online = "online" in self.training_mode # whether we compute the gradient online
if self.online:
self.approximation_type = self.training_mode[7:]
# NOTE if exp_param is true, self.theta and self.nu actually represent the log of nu and
# theta lambda is initialized uniformly in complex plane
self.theta = self.param(
"theta",
partial(theta_init, max_phase=self.max_phase, log=self.exp_param),
(self.d_hidden,),
) # phase of lambda in [0, max_phase]
self.nu = self.param(
"nu",
partial(nu_init, r_min=self.r_min, r_max=self.r_max, log=self.exp_param),
(self.d_hidden,),
) # norm of lambda in [r_min, r_max]
if self.gamma_norm:
self.gamma_log = self.param(
"gamma_log", partial(gamma_log_init, log=self.exp_param), (self.nu, self.theta)
)
# Glorot initialized Input/Output projection matrices
self.B_re = self.param(
"B_re",
partial(matrix_init, normalization=jnp.sqrt(2 * self.d_model)),
(self.d_hidden, self.d_model),
)
self.B_im = self.param(
"B_im",
partial(matrix_init, normalization=jnp.sqrt(2 * self.d_model)),
(self.d_hidden, self.d_model),
)
self.C_re = self.param(
"C_re",
partial(matrix_init, normalization=jnp.sqrt(self.d_hidden)),
(self.d_model, self.d_hidden),
)
self.C_im = self.param(
"C_im",
partial(matrix_init, normalization=jnp.sqrt(self.d_hidden)),
(self.d_model, self.d_hidden),
)
self.D = self.param("D", matrix_init, (self.d_model,))
# Internal variables of the model needed for updating the gradient
if self.online and self.approximation_type not in ["spatial", "reservoir"]:
self.pert_hidden_states = self.variable(
"perturbations",
"hidden_states",
partial(jnp.zeros, dtype=jnp.complex64),
(self.seq_length, self.d_hidden),
)
self.traces_gamma = self.variable(
"traces", "gamma", jnp.zeros, (self.seq_length, self.d_hidden)
)
self.traces_lambda = self.variable(
"traces", "lambda", jnp.zeros, (self.seq_length, self.d_hidden)
)
if self.approximation_type in ["full", "snap1", "full_rec_simpleB"]:
self.traces_B = self.variable(
"traces", "B", jnp.zeros, (self.seq_length, self.d_hidden, self.d_model)
)
def __call__(self, inputs):
"""
Forward pass. If in training mode, additionally computes the eligibility traces that
will be needed to compute the gradient estimate in backward.
"""
# Compute hidden states and outputs
hidden_states = self.get_hidden_states(inputs)
if self.online and self.approximation_type not in ["spatial", "reservoir"]:
# To obtain the spatially backpropagated errors sent to hidden_states
# NOTE: only works if pert_hidden_states is equal to 0
hidden_states += self.pert_hidden_states.value
output = self.to_output(inputs, hidden_states)
# Compute and update traces if needed (i.e. if we are in online training mode)
if self.online and self.approximation_type not in ["spatial", "reservoir"]:
Bu_elements = jax.vmap(lambda u: self.get_B() @ u)(inputs)
# Update traces for B, lambda and gamma
if self.approximation_type in ["1truncated"]:
self.traces_lambda.value = hidden_states[:-1]
self.traces_gamma.value = Bu_elements
elif self.approximation_type in ["full", "full_rec", "full_rec_simpleB", "snap1"]:
Lambda_elements = jnp.repeat(
self.get_diag_lambda()[None, ...], inputs.shape[0], axis=0
)
# Update for trace lambda
_, self.traces_lambda.value = jax.lax.associative_scan(
binary_operator_diag,
(Lambda_elements[:-1], hidden_states[:-1]),
)
# Update for trace gamma
Bu_elements_gamma = Bu_elements
_, self.traces_gamma.value = jax.lax.associative_scan(
binary_operator_diag, (Lambda_elements, Bu_elements_gamma)
)
# Update trace for B
if self.approximation_type in ["full", "snap1"]:
full_Lambda_elements = jnp.repeat(
jnp.expand_dims(self.get_diag_lambda(), axis=-1)[None, ...],
inputs.shape[0],
axis=0,
) # same as left multiplying by diag(lambda), but same shape as B (to allow for
# element-wise multiplication in the associative scan)
gammau_elements = jax.vmap(lambda u: jnp.outer(self.get_diag_gamma(), u))(
inputs
).astype(jnp.complex64)
_, self.traces_B.value = jax.lax.associative_scan(
binary_operator_diag,
(full_Lambda_elements, gammau_elements + 0j),
)
elif self.approximation_type in ["full_rec_simpleB"]:
self.traces_B.value = jax.vmap(lambda u: jnp.outer(self.get_diag_gamma(), u))(
inputs
).astype(jnp.complex64)
return output
def update_gradients(self, grad):
"""
Eventually combine traces and perturbations to compute the (online) gradient.
"""
if self.training_mode in ["bptt", "online_spatial", "online_reservoir"]:
raise ValueError("Upgrade gradient should not be called for this training mode")
# We need to change the gradients for lambda, gamma and B
# The others are automatically computed with spatial backpropagation
# NOTE: self.pert_hidden_states contains dL/dhidden_states
# Grads for lambda
delta_lambda = jnp.sum(self.pert_hidden_states.value[1:] * self.traces_lambda.value, axis=0)
_, dl = jax.vjp(
lambda nu, theta: self.get_diag_lambda(nu=nu, theta=theta),
self.nu,
self.theta,
)
grad_nu, grad_theta = dl(delta_lambda)
grad["nu"] = grad_nu
grad["theta"] = grad_theta
# Grads for gamma if needed
if self.gamma_norm:
delta_gamma = jnp.sum(
(self.pert_hidden_states.value * self.traces_gamma.value).real, axis=0
)
# as dgamma/dgamma_log = exp(gamma_log) = gamma
grad["gamma_log"] = delta_gamma * self.get_diag_gamma()
# Grads for B
if self.approximation_type in ["snap1", "full", "full_rec_simpleB"]:
grad_B = jnp.sum(
jax.vmap(lambda dx, trace: dx.reshape(-1, 1) * trace)(
self.pert_hidden_states.value, self.traces_B.value
),
axis=0,
)
grad["B_re"] = grad_B.real
grad["B_im"] = -grad_B.imag # Comes from the use of Writtinger derivatives
return grad
# Path: online_lru/rec.py
class RNN(nn.Module):
"""
RNN layer that updates internal elegibility traces to allow online
learning.
"""
d_hidden: int # hidden state dimension
d_model: int # input and output dimensions
seq_length: int # time sequence length
activation: str = "linear" # activation function
training_mode: str = "bptt" # which learning algorithm that will be used
scaling_hidden: float = 1.0 # additional scaling for the A matrix in the RNN
def setup(self):
# Check that desired approximation is handled
assert self.training_mode in [
"bptt",
"online_spatial",
"online_1truncated",
"online_snap1",
]
self.online = "online" in self.training_mode # whether we compute the gradient online
if self.online:
self.approximation_type = self.training_mode[7:]
else:
self.approximation_type = "bptt"
# Truncated normal to match haiku's initialization
self.A = self.param(
"A",
partial(truncated_normal_matrix_init, normalization=jnp.sqrt(self.d_hidden)),
(self.d_hidden, self.d_hidden),
)
self.B = self.param(
"B",
partial(matrix_init, normalization=jnp.sqrt(self.d_model)),
(self.d_hidden, self.d_model),
)
self.C = self.param(
"C",
partial(matrix_init, normalization=jnp.sqrt(self.d_hidden)),
(self.d_model, self.d_hidden),
)
self.D = self.param("D", matrix_init, (self.d_model,))
if self.activation == "linear":
self.act_fun = lambda x: x
elif self.activation == "tanh":
self.act_fun = jax.nn.tanh
elif self.activation == "relu":
self.act_fun = jax.nn.relu
else:
raise ValueError("Activation function not supported")
# Internal variables of the model needed for updating the gradient
if self.approximation_type in ["snap1"]:
self.traces_A = self.variable(
"traces", "A", jnp.zeros, (self.seq_length, self.d_hidden, self.d_hidden)
)
self.traces_B = self.variable(
"traces", "B", jnp.zeros, (self.seq_length, self.d_hidden, self.d_model)
)
self.pert_hidden_states = self.variable(
"perturbations",
"hidden_states",
partial(jnp.zeros, dtype=jnp.float32),
(self.seq_length, self.d_hidden),
)
def get_hidden_states(self, inputs):
"""
Compute the hidden states corresponding to inputs.
Return:
hidden_states array[T, N]
"""
def _step(state, Bu):
if self.training_mode in ["bptt"]:
new_state = self.A @ self.act_fun(state) + Bu
elif self.approximation_type in ["1truncated"]:
new_state = self.A @ jax.lax.stop_gradient(self.act_fun(state)) + Bu
else:
new_state = jax.lax.stop_gradient(self.A @ self.act_fun(state)) + Bu
return new_state, new_state
Bu_elements = jax.vmap(lambda u: self.B @ u)(inputs)
_, hidden_states = jax.lax.scan(_step, jnp.zeros(self.d_hidden), Bu_elements)
return hidden_states
def to_output(self, inputs, hidden_states):
"""
Compute output given inputs and hidden states.
Args:
inputs array[T, H].
hidden_states array[T, N].
"""
return jax.vmap(lambda x, u: self.C @ x + self.D * u)(hidden_states, inputs)
def __call__(self, inputs):
"""
Forward pass. If in training mode, additionally computes the
eligibility traces that will be needed to compute the gradient estimate
in backward.
"""
# Compute hidden states and output
hidden_states = self.get_hidden_states(inputs)
if self.approximation_type in ["snap1"]:
# To obtain the spatially backpropagated errors sent to hidden_states
hidden_states += self.pert_hidden_states.value
output = self.to_output(inputs, hidden_states)
# Update traces
if self.approximation_type in ["snap1"]:
# Repeat diagonal of A T times
diags_A = jnp.repeat(jnp.diagonal(self.A)[None, ...], inputs.shape[0], axis=0)
# Add the rho'(x) to it
der_act_fun = jax.grad(self.act_fun)
rho_primes = jax.vmap(lambda x: jax.vmap(der_act_fun)(x))(hidden_states)
A_rho_prime_elements_N = jax.vmap(lambda x: jnp.outer(x, jnp.ones((self.d_hidden,))))(
diags_A * rho_primes
)
A_rho_prime_elements_H = jax.vmap(lambda x: jnp.outer(x, jnp.ones((self.d_model,))))(
diags_A * rho_primes
)
# Compute the trace of A
# with tA_{t+1} = (diag A * rho'(x_t)) 1^T * tA_t + 1 rho(x_t)^T
rho_x_elements = jax.vmap(lambda x: jnp.outer(jnp.ones((self.d_hidden,)), x))(
self.act_fun(hidden_states)
)
_, self.traces_A.value = jax.lax.associative_scan(
partial(binary_operator_diag),
(A_rho_prime_elements_N, rho_x_elements),
)
# Compute the trace of B
# with tB_{t+1} = (diag A * rho'(x_t)) 1^T * tB_t + 1 rho(x_t)^T
u_elements = jax.vmap(lambda u: jnp.outer(jnp.ones((self.d_hidden,)), u))(inputs)
_, self.traces_B.value = jax.lax.associative_scan(
partial(binary_operator_diag), (A_rho_prime_elements_H, u_elements)
)
return output
def update_gradients(self, grad):
"""
Eventually combine traces and perturbations to compute the (online) gradient.
"""
if self.approximation_type not in ["snap1"]:
return grad
# We need to change the gradients for A, and B
grad["A"] = jnp.sum(
jax.vmap(lambda dx, trace: dx.reshape(-1, 1) * trace)(
self.pert_hidden_states.value[1:], self.traces_A.value[:-1]
),
axis=0,
)
grad["B"] = jnp.sum(
jax.vmap(lambda dx, trace: dx.reshape(-1, 1) * trace)(
self.pert_hidden_states.value[1:], self.traces_B.value[:-1]
),
axis=0,
)
return grad
# Path: tests/utils.py
def loss_pred(pred, label, mask=None):
def check_grad_all(grad_1, grad_2, to_check=None, **kwargs):
def compute_grads(model, params_states, inputs, y, mask):
# Path: tests/test_rec.py
import os
import unittest
import jax.numpy as jnp
import jax
import jax.lax
import flax.linen as nn
import flax
from online_lru.rec import LRU, RNN
from tests.utils import base_params, inputs, y, mask, check_grad_all, compute_grads
def test_toy(self):
inputs = jnp.array([[[1.0], [2.0], [3.0]]])
y = jnp.ones([1, 3])
mask = jnp.ones([1, 3])
# custom init of the params and state
params_states = {
"params": {
"B_re": jnp.array([[2.0]]),
"B_im": jnp.array([[2.0]]),
"C_re": jnp.array([[2.0]]),
"C_im": jnp.array([[1.0]]),
"D": jnp.array([0.0]),
"gamma_log": jnp.array([0.0]),
"nu": jnp.log(jnp.log(jnp.array([2.0]))),
"theta": jnp.array([-1e8]),
},
"traces": {
"lambda": jnp.zeros([1, 3, 1], dtype=jnp.complex64),
"gamma": jnp.zeros([1, 3, 1], dtype=jnp.complex64),
"B": jnp.zeros([1, 3, 1, 1], dtype=jnp.complex64),
},
"perturbations": {
"hidden_states": jnp.zeros([1, 3, 1], dtype=jnp.complex64),
},
}
# Corresponding states
# -- x hidden state
# x_0 = 0
# x_1 = 0.5 * 0 + 1 * (2 + 2j) * 1 = 2 + 2j
# x_2 = 0.5 * (2 + 2j) + 1 * (2 + 2j) * 2 = 5 + 5j
# x_3 = 0.5 * (5 + 5j) + 1 * (2 + 2j) * 3 = 8.5 + 8.5j
# -- pred_y prediction
# pred_y_1 = 2
# pred_y_2 = 5
# pred_y_3 = 8.5
# -- delta_y BP error y
# delta_y_1 = 1
# delta_y_2 = 4
# delta_y_3 = 7.5
# -- delta_x BP error x
# delta_x_1 = 1 * (2 + j) = 2 + j
# delta_x_2 = 4 * (2 + j) = 8 + 4j
# delta_x_3 = 7.5 * (2 + j) = 15 + 7.5j
# delta_y BP error y
# -- e_lambda
# e_lambda_1 = 0.5 * 0 + 0 = 0
# e_lambda_2 = 0.5 * 0 + 2 + 2j = 2 + 2j
# e_lambda_3 = 0.5 * (2 + 2j) + 5 + 5j = 6 + 6j
# -- dLambda
# dLambda_1 = (2 + j) * 0 = 0
# dLambda_2 = (8 + 4j) * (2 + 2j) = 8 + 24j
# dLambda_3 = (15 + 7.5j) * (6 + 6j) = 45 + 135j
# dLambda = 53 + 159j
# -- e_gamma
# e_gamma_1 = 0.5 * 0 + (2 + 2j) * 1 = 2 + 2j
# e_gamma_2 = 0.5 * (2 + 2j) * 2 = 5 + 5j
# e_gamma_3 = 0.5 * (5 + 5j) + (2 + 2j) * 3 = 8.5 + 8.5j
# -- d_gamma
# d_gamma_1 = Re[(2 + 2j) * (2 + j)] = 2
# d_gamma_2 = Re[(5 + 5j) * (8 + 4j)] = 20
# d_gamma_3 = Re[(8.5 + 8.5j) * (15 + 7.5j)] = 63.75
# d_gamma = 85.75
# -- e_B
# e_B_1 = 0.5 * 0 + 1 = 1
# e_B_2 = 0.5 * 1 + 2 = 2.5
# e_B_3 = 0.5 * 2.5 + 3 = 4.25
# -- dB
# dB_1 = (2 - j) * 1 = 2 - j
# dB_2 = (8 - 4j) * 2.5 = 20 - 10j
# dB_3 = (15 - 7.5j) * 4.25 = 63.75 - 31.875j
# dB = 85.75 - 42.875j
# Compute gradient online
batched_lru = batched_LRU(d_hidden=1, d_model=1, seq_length=3, training_mode="online_full")
batched_LRU.rec_type = "LRU"
grad, online_grad = compute_grads(batched_lru, params_states, inputs, y, mask)
check_grad_all(grad, online_grad, atol=1e-5)
def test_online_full(self):
# Compute gradient online
batched_lru = batched_LRU(**base_params, training_mode="online_full")
batched_lru.rec_type = "LRU"
params_states = batched_lru.init({"params": jax.random.PRNGKey(0)}, inputs)
grad, online_grad = compute_grads(batched_lru, params_states, inputs, y, mask)
# Check that the two match
check_grad_all(grad, online_grad, atol=1e-2)
def test_online_spatial(self):
batched_lru = batched_LRU(**base_params, training_mode="online_spatial")
batched_lru.rec_type = "LRU"
def_params_states = batched_lru.init({"params": jax.random.PRNGKey(0)}, inputs)
# Remove temporal recurrence
params_states = {}
params_states["params"] = flax.core.frozen_dict.unfreeze(def_params_states["params"])
params_states["params"]["nu"] = jnp.ones_like(params_states["params"]["nu"]) * 1e8
params_states["params"] = flax.core.frozen_dict.freeze(params_states["params"])
grad, online_grad = compute_grads(batched_lru, params_states, inputs, y, mask)
# Remove nu and theta from the comparison of the gradient and check that they are 0
assert jnp.allclose(online_grad["nu"], jnp.zeros_like(online_grad["nu"]))
assert jnp.allclose(online_grad["theta"], jnp.zeros_like(online_grad["theta"]))
grad = {k: grad[k] for k in ["B_im", "B_re", "C_im", "C_re", "D", "gamma_log"]}
online_grad = {
k: online_grad[k] for k in ["B_im", "B_re", "C_im", "C_re", "D", "gamma_log"]
}
check_grad_all(online_grad, grad, atol=1e-3)
class TestRNN(unittest.TestCase):
def test_online_snap1(self):
# Compute gradient online
batched_rnn = batched_RNN(**base_params, training_mode="online_snap1")
| batched_rnn.rec_type = "RNN" |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: zhaohengz/CAILA
# Path: flags.py
DATA_FOLDER = "./all_data"
# Path: data/dataset.py
class ImageLoader:
class CompositionDataset(Dataset):
def __init__(self, root):
def __call__(self, img):
def dataset_transform(phase, norm_family ='clip'):
def filter_data(all_data, pairs_gt, topk = 5):
def __init__(
self,
root,
phase,
dataset=None,
split = 'compositional-split',
norm_family = 'imagenet',
subset = False,
pair_dropout = 0.0,
return_images = False,
train_only = False,
open_world=False
):
def build_data_dict(self, data):
def insert(map, key, value):
def parse_split(self):
def parse_pairs(pair_list):
def get_split_info(self):
def get_dict_data(self, data, attrs, objs, pairs):
def reset_dropout(self):
def sample_negative(self, attr, obj):
def sample_mixup(self, attr, obj):
def sample_affordance(self, attr, obj):
def sample_train_affordance(self, attr, obj, map, target):
def set_p(self, p_mixup, p_shift, p_obj_shift):
def sample_contrastive(self, map, key, num_neg):
def __getitem__(self, index):
def __len__(self):
# Path: models/common.py
class Evaluator:
def __init__(self, dset, model):
self.dset = dset
# Convert text pairs to idx tensors: [('sliced', 'apple'), ('ripe', 'apple'), ...] --> torch.LongTensor([[0,1],[1,1], ...])
pairs = [(dset.attr2idx[attr], dset.obj2idx[obj]) for attr, obj in dset.pairs]
self.train_pairs = [(dset.attr2idx[attr], dset.obj2idx[obj]) for attr, obj in dset.train_pairs]
self.pairs = torch.LongTensor(pairs)
# Mask over pairs that occur in closed world
# Select set based on phase
if dset.phase == 'train':
print('Evaluating with train pairs')
test_pair_set = set(dset.train_pairs)
test_pair_gt = set(dset.train_pairs)
elif dset.phase == 'val':
print('Evaluating with validation pairs')
test_pair_set = set(dset.val_pairs + dset.train_pairs)
test_pair_gt = set(dset.val_pairs)
else:
print('Evaluating with test pairs')
test_pair_set = set(dset.test_pairs + dset.train_pairs)
test_pair_gt = set(dset.test_pairs)
self.test_pair_dict = [(dset.attr2idx[attr], dset.obj2idx[obj]) for attr, obj in test_pair_gt]
self.test_pair_dict = dict.fromkeys(self.test_pair_dict, 0)
# dict values are pair val, score, total
for attr, obj in test_pair_gt:
pair_val = dset.pair2idx[(attr,obj)]
key = (dset.attr2idx[attr], dset.obj2idx[obj])
self.test_pair_dict[key] = [pair_val, 0, 0]
if dset.open_world:
masks = [1 for _ in dset.pairs]
else:
masks = [1 if pair in test_pair_set else 0 for pair in dset.pairs]
self.closed_mask = torch.BoolTensor(masks)
# Mask of seen concepts
seen_pair_set = set(dset.train_pairs)
mask = [1 if pair in seen_pair_set else 0 for pair in dset.pairs]
self.seen_mask = torch.BoolTensor(mask)
# Object specific mask over which pairs occur in the object oracle setting
oracle_obj_mask = []
for _obj in dset.objs:
mask = [1 if _obj == obj else 0 for attr, obj in dset.pairs]
oracle_obj_mask.append(torch.BoolTensor(mask))
self.oracle_obj_mask = torch.stack(oracle_obj_mask, 0)
# Decide if the model under evaluation is a manifold model or not
self.score_model = self.score_manifold_model
# Generate mask for each settings, mask scores, and get prediction labels
def generate_predictions(self, scores, obj_truth, bias = 0.0, topk = 5): # (Batch, #pairs)
'''
Inputs
scores: Output scores
obj_truth: Ground truth object
Returns
results: dict of results in 3 settings
'''
def get_pred_from_scores(_scores, topk):
'''
Given list of scores, returns top 10 attr and obj predictions
Check later
'''
_, pair_pred = _scores.topk(topk, dim = 1) #sort returns indices of k largest values
pair_pred = pair_pred.contiguous().view(-1)
attr_pred, obj_pred = self.pairs[pair_pred][:, 0].view(-1, topk), \
self.pairs[pair_pred][:, 1].view(-1, topk)
return (attr_pred, obj_pred)
results = {}
orig_scores = scores.clone()
mask = self.seen_mask.repeat(scores.shape[0],1) # Repeat mask along pairs dimension
scores[~mask] += bias # Add bias to test pairs
# Unbiased setting
# Open world setting --no mask, all pairs of the dataset
results.update({'open': get_pred_from_scores(scores, topk)})
results.update({'unbiased_open': get_pred_from_scores(orig_scores, topk)})
# Closed world setting - set the score for all Non test pairs to -1e10,
# this excludes the pairs from set not in evaluation
mask = self.closed_mask.repeat(scores.shape[0], 1)
closed_scores = scores.clone()
closed_scores[~mask] = -1e10
# closed_orig_scores = orig_scores.clone()
# closed_orig_scores[~mask] = -1e10
results.update({'closed': get_pred_from_scores(closed_scores, topk)})
# results.update({'unbiased_closed': get_pred_from_scores(closed_orig_scores, topk)})
# Object_oracle setting - set the score to -1e10 for all pairs where the true object does Not participate, can also use the closed score
# mask = self.oracle_obj_mask[obj_truth]
# oracle_obj_scores = scores.clone()
# oracle_obj_scores[~mask] = -1e10
# oracle_obj_scores_unbiased = orig_scores.clone()
# oracle_obj_scores_unbiased[~mask] = -1e10
# results.update({'object_oracle': get_pred_from_scores(oracle_obj_scores, 1)})
# results.update({'object_oracle_unbiased': get_pred_from_scores(oracle_obj_scores_unbiased, 1)})
results['scores'] = orig_scores
return results
def score_clf_model(self, scores, obj_truth, topk = 5):
'''
Wrapper function to call generate_predictions for CLF models
'''
attr_pred, obj_pred = scores
# Go to CPU
attr_pred, obj_pred, obj_truth = attr_pred.to('cpu'), obj_pred.to('cpu'), obj_truth.to('cpu')
# Gather scores (P(a), P(o)) for all relevant (a,o) pairs
# Multiply P(a) * P(o) to get P(pair)
attr_subset = attr_pred.index_select(1, self.pairs[:,0]) # Return only attributes that are in our pairs
obj_subset = obj_pred.index_select(1, self.pairs[:, 1])
scores = (attr_subset * obj_subset) # (Batch, #pairs)
results = self.generate_predictions(scores, obj_truth)
results['biased_scores'] = scores
return results
def score_manifold_model(self, scores, obj_truth, bias = 0.0, topk = 5):
'''
Wrapper function to call generate_predictions for manifold models
'''
# Go to CPU
# scores = {k: v.to('cpu') for k, v in scores.items()}
obj_truth = obj_truth.to(device)
# Gather scores for all relevant (a,o) pairs
'''
scores = torch.stack(
[scores[(attr,obj)] for attr, obj in self.dset.pairs], 1
) # (Batch, #pairs)
'''
# orig_scores = scores.clone()
results = self.generate_predictions(scores.clone(), obj_truth, bias, topk)
# results['scores'] = orig_scores
return results
def score_fast_model(self, scores, obj_truth, bias = 0.0, topk = 5):
'''
Wrapper function to call generate_predictions for manifold models
'''
results = {}
mask = self.seen_mask.repeat(scores.shape[0],1) # Repeat mask along pairs dimension
scores[~mask] += bias # Add bias to test pairs
mask = self.closed_mask.repeat(scores.shape[0], 1)
closed_scores = scores.clone()
closed_scores[~mask] = -1e10
_, pair_pred = closed_scores.topk(topk, dim = 1) #sort returns indices of k largest values
pair_pred = pair_pred.contiguous().view(-1)
attr_pred, obj_pred = self.pairs[pair_pred][:, 0].view(-1, topk), \
self.pairs[pair_pred][:, 1].view(-1, topk)
results.update({'closed': (attr_pred, obj_pred)})
return results
def evaluate_predictions(self, predictions, attr_truth, obj_truth, pair_truth, allpred, topk = 1):
# Go to CPU
attr_truth, obj_truth, pair_truth = attr_truth.to('cpu'), obj_truth.to('cpu'), pair_truth.to('cpu')
pairs = list(
zip(list(attr_truth.numpy()), list(obj_truth.numpy())))
seen_ind, unseen_ind = [], []
for i in range(len(attr_truth)):
if pairs[i] in self.train_pairs:
seen_ind.append(i)
else:
unseen_ind.append(i)
seen_ind, unseen_ind = torch.LongTensor(seen_ind), torch.LongTensor(unseen_ind)
def _process(_scores):
# Top k pair accuracy
# Attribute, object and pair
attr_match = (attr_truth.unsqueeze(1).repeat(1, topk) == _scores[0][:, :topk])
obj_match = (obj_truth.unsqueeze(1).repeat(1, topk) == _scores[1][:, :topk])
# Match of object pair
match = (attr_match * obj_match).any(1).float()
attr_match = attr_match.any(1).float()
obj_match = obj_match.any(1).float()
# Match of seen and unseen pairs
seen_match = match[seen_ind]
unseen_match = match[unseen_ind]
### Calculating class average accuracy
# local_score_dict = copy.deepcopy(self.test_pair_dict)
# for pair_gt, pair_pred in zip(pairs, match):
# # print(pair_gt)
# local_score_dict[pair_gt][2] += 1.0 #increase counter
# if int(pair_pred) == 1:
# local_score_dict[pair_gt][1] += 1.0
# # Now we have hits and totals for classes in evaluation set
# seen_score, unseen_score = [], []
# for key, (idx, hits, total) in local_score_dict.items():
# score = hits/total
# if bool(self.seen_mask[idx]) == True:
# seen_score.append(score)
# else:
# unseen_score.append(score)
seen_score, unseen_score = torch.ones(512,5), torch.ones(512,5)
return attr_match, obj_match, match, seen_match, unseen_match, \
torch.Tensor(seen_score+unseen_score), torch.Tensor(seen_score), torch.Tensor(unseen_score)
def _add_to_dict(_scores, type_name, stats):
base = ['_attr_match', '_obj_match', '_match', '_seen_match', '_unseen_match', '_ca', '_seen_ca', '_unseen_ca']
for val, name in zip(_scores, base):
stats[type_name + name] = val
##################### Match in places where corrent object
# obj_oracle_match = (attr_truth == predictions['object_oracle'][0][:, 0]).float() #object is already conditioned
# obj_oracle_match_unbiased = (attr_truth == predictions['object_oracle_unbiased'][0][:, 0]).float()
# stats = dict(obj_oracle_match = obj_oracle_match, obj_oracle_match_unbiased = obj_oracle_match_unbiased)
stats = dict()
#################### Closed world
closed_scores = _process(predictions['closed'])
print(closed_scores[1].mean())
# unbiased_closed = _process(predictions['unbiased_closed'])
_add_to_dict(closed_scores, 'closed', stats)
# _add_to_dict(unbiased_closed, 'closed_ub', stats)
#################### Calculating AUC
scores = predictions['scores']
# getting score for each ground truth class
correct_scores = scores[torch.arange(scores.shape[0]), pair_truth][unseen_ind]
# Getting top predicted score for these unseen classes
max_seen_scores = predictions['scores'][unseen_ind][:, self.seen_mask].topk(topk, dim=1)[0][:, topk - 1]
# Getting difference between these scores
unseen_score_diff = max_seen_scores - correct_scores
# Getting matched classes at max bias for diff
unseen_matches = stats['closed_unseen_match'].bool()
correct_unseen_score_diff = unseen_score_diff[unseen_matches] - 1e-4
# print(correct_unseen_score_diff)
# sorting these diffs
correct_unseen_score_diff = torch.sort(correct_unseen_score_diff)[0]
magic_binsize = 20
# getting step size for these bias values
# print(correct_unseen_score_diff)
bias_skip = max(len(correct_unseen_score_diff) // magic_binsize, 1)
# Getting list
biaslist = correct_unseen_score_diff[::bias_skip]
seen_match_max = float(stats['closed_seen_match'].mean())
unseen_match_max = float(stats['closed_unseen_match'].mean())
seen_accuracy, unseen_accuracy = [], []
# Go to CPU
# base_scores = {k: v.to('cpu') for k, v in allpred.items()}
obj_truth = obj_truth.to('cpu')
# Gather scores for all relevant (a,o) pairs
# base_scores = torch.stack(
# [allpred[(attr,obj)] for attr, obj in self.dset.pairs], 1
# ) # (Batch, #pairs)
base_scores = allpred
print("Start computing Biases")
for bias in biaslist:
scores = base_scores.clone()
results = self.score_fast_model(scores, obj_truth, bias = bias, topk = topk)
results = results['closed'] # we only need biased
results = _process(results)
seen_match = float(results[3].mean())
unseen_match = float(results[4].mean())
# print(seen_match, unseen_match)
# if seen_match > 0 and unseen_match > 0:
seen_accuracy.append(seen_match)
unseen_accuracy.append(unseen_match)
# print(seen_match_max, unseen_match_max)
# if seen_match_max > 0 and unseen_match_max > 0:
seen_accuracy.append(seen_match_max)
unseen_accuracy.append(unseen_match_max)
seen_accuracy, unseen_accuracy = np.array(seen_accuracy), np.array(unseen_accuracy)
area = np.trapz(seen_accuracy, unseen_accuracy)
for key in stats:
stats[key] = float(stats[key].mean())
harmonic_mean = hmean([seen_accuracy, unseen_accuracy], axis = 0)
max_hm = np.max(harmonic_mean)
idx = np.argmax(harmonic_mean)
if idx == len(biaslist):
bias_term = 1e3
else:
bias_term = biaslist[idx]
stats['biasterm'] = float(bias_term)
stats['best_unseen'] = np.max(unseen_accuracy)
stats['best_seen'] = np.max(seen_accuracy)
stats['AUC'] = area
stats['hm_unseen'] = unseen_accuracy[idx]
stats['hm_seen'] = seen_accuracy[idx]
stats['best_hm'] = max_hm
return stats
# Path: utils/utils.py
def load_args(filename, args):
with open(filename, 'r') as stream:
data_loaded = yaml.safe_load(stream)
for key, group in data_loaded.items():
for key, val in group.items():
setattr(args, key, val)
# Path: utils/config_model.py
def configure_model(args, dataset):
is_open = False
if args.model == 'CAILA':
model = CAILA(dataset, args)
model_params = []
prompt_params = []
trainnable_params = ['norm', 'adapter', 'projection', 'gating_fn', 'logit_scale', 'primitive_fusion']
if args.learnable_prompt:
trainnable_params.append('token_embedding')
for name, param in model.named_parameters():
flag = False
for x in trainnable_params:
if x in name:
param.requires_grad_(True)
model_params.append(param)
flag = True
break
if flag:
pass
elif 'prompt' in name:
param.requires_grad_(True)
prompt_params.append(param)
print("Prompt {}".format(name))
else:
param.requires_grad_(False)
optim_params = [{'params':model_params}, {'params':prompt_params, 'lr': args.lr}]
optimizer = optim.Adam(optim_params, lr=args.lr, weight_decay=args.wd)
model.is_open = is_open
elif args.model == 'plainclip':
model = PlainClip(dataset, args)
model_params = []
prompt_params = []
trainnable_params = ['norm', 'adapter', 'projection', 'gating_fn', 'logit_scale', 'primitive_fusion']
if args.learnable_prompt:
trainnable_params.append('token_embedding')
for name, param in model.named_parameters():
flag = False
for x in trainnable_params:
if x in name:
param.requires_grad_(True)
model_params.append(param)
flag = True
break
if flag:
pass
elif 'prompt' in name:
param.requires_grad_(True)
prompt_params.append(param)
print("Prompt {}".format(name))
else:
param.requires_grad_(False)
optim_params = [{'params':model_params}, {'params':prompt_params, 'lr': args.lr}]
optimizer = optim.Adam(optim_params, lr=args.lr, weight_decay=args.wd)
model.is_open = is_open
optimizer = optim.Adam(optim_params, lr=args.lr, weight_decay=args.wd)
model.is_open = is_open
return model, optimizer
# Path: flags.py
DATA_FOLDER = "./all_data"
# Path: test.py
import torch
import torch.backends.cudnn as cudnn
import numpy as np
import tqdm
import os
from torch.utils.tensorboard import SummaryWriter
from flags import DATA_FOLDER
from tqdm import tqdm
from os.path import join as ospj
from data import dataset as dset
from models.common import Evaluator
from utils.utils import load_args
from utils.config_model import configure_model
from flags import parser
# Torch imports
cudnn.benchmark = True
# Python imports
# Local imports
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def main():
# Get arguments and start logging
args = parser.parse_args()
logpath = args.logpath
load_args(args.config, args)
# Get dataset
trainset = dset.CompositionDataset(
root=os.path.join(DATA_FOLDER, args.data_dir),
phase='train',
split=args.splitname,
train_only=args.train_only,
subset=args.subset,
open_world=args.open_world,
dataset=args.dataset
)
testset = dset.CompositionDataset(
root=os.path.join(DATA_FOLDER,args.data_dir),
phase='test',
split=args.splitname,
subset=args.subset,
open_world=args.open_world,
norm_family=args.norm_family,
dataset=args.dataset
)
testloader = torch.utils.data.DataLoader(
testset,
batch_size=args.test_batch_size,
shuffle=False,
num_workers=args.workers)
# Get model and optimizer
model, _ = configure_model(args, trainset)
args.load = ospj(logpath,'ckpt_best_auc.t7')
checkpoint = torch.load(args.load)
model.load_state_dict(checkpoint['net'], strict=True)
model = model.cuda()
model.eval()
evaluator = Evaluator(testset, model)
with torch.no_grad():
test(model, testloader, evaluator, args)
def test(model, testloader, evaluator, args, threshold=None, print_results=True):
model.eval()
accuracies, all_sub_gt, all_attr_gt, all_obj_gt, all_pair_gt, all_pred = [], [], [], [], [], []
for idx, data in tqdm(enumerate(testloader), total=len(testloader), desc='Testing'):
data = [d.to(device) for d in data]
_, predictions = model(data)
| attr_truth, obj_truth, pair_truth = data[1], data[2], data[3] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: fortelex/hiveline
# Path: hiveline/jobs/jobs.py
class JobHandler:
def __init__(self, service_name: str, sim_id: str, data_source: JobsDataSource):
self.service_name = service_name
self.sim_id = sim_id
self.data_source = data_source
def create_jobs(self, job_ids: list[str]):
self.data_source.create_jobs(self.sim_id, self.service_name, job_ids)
def reset_jobs(self):
self.data_source.reset_jobs(self.sim_id, self.service_name)
def reset_timed_out_jobs(self):
self.data_source.reset_jobs(self.sim_id, self.service_name, status=[JobStatus.STARTED],
max_started_date=datetime.datetime.now() - datetime.timedelta(minutes=5))
def reset_failed_jobs(self):
self.data_source.reset_jobs(self.sim_id, self.service_name, status=[JobStatus.FAILED])
def iterate_jobs(self, handler: Callable[[str], None], threads=4, debug_progress=True, max_consecutive_errors=5):
if threads > 1:
self._spawn_threads(handler, threads, debug_progress, max_consecutive_errors)
return
self._iterate_jobs(handler, debug_progress, max_consecutive_errors)
def _spawn_threads(self, handler: Callable[[str], None], num_threads=4, debug_progress=True,
max_consecutive_errors=5):
threads = []
for i in range(num_threads):
t = threading.Thread(target=self._iterate_jobs,
args=(handler, debug_progress and i == 0, max_consecutive_errors))
t.start()
threads.append(t)
for t in threads:
t.join()
def _iterate_jobs(self, handler: Callable[[str], None], debug_progress=True, max_consecutive_errors=5):
# get the total number of jobs
total_jobs = 0
if debug_progress:
total_jobs = self.data_source.count_jobs(self.sim_id, self.service_name, status=JobStatus.PENDING)
# by default, we will not stop the process if there is one error, but if there are multiple consecutive errors,
# we will stop the process
consecutive_error_number = 0
last_print = 0
while True:
job_id = self.data_source.pop_job(self.sim_id, self.service_name)
if job_id is None:
break
current_time = time.time()
if debug_progress and current_time - last_print > 1:
last_print = current_time
pending_jobs = self.data_source.count_jobs(self.sim_id, self.service_name, status=JobStatus.PENDING)
print("Progress: ~{:.2f}% {:}".format(100 * (1 - pending_jobs / total_jobs), job_id))
try:
handler(job_id)
consecutive_error_number = 0
self.data_source.update_job(self.sim_id, self.service_name, job_id, JobStatus.FINISHED)
except Exception as e:
consecutive_error_number += 1
print(f"Error processing job {job_id}: {e}")
# set status to failed
self.data_source.update_job(self.sim_id, self.service_name, job_id, JobStatus.FAILED, str(e))
if consecutive_error_number > max_consecutive_errors:
raise e
def count_jobs(self, status):
return self.data_source.count_jobs(self.sim_id, self.service_name, status=status)
# Path: hiveline/jobs/jobs.py
class JobStatus(Enum):
PENDING = "pending"
STARTED = "started"
FINISHED = "finished"
FAILED = "failed"
def __str__(self):
return self.value
def __repr__(self):
return self.value
def to_str(self):
return self.value
@staticmethod
def from_str(s: str):
if s == "pending":
return JobStatus.PENDING
elif s == "started":
return JobStatus.STARTED
elif s == "finished":
return JobStatus.FINISHED
elif s == "failed":
return JobStatus.FAILED
else:
raise ValueError(f"Invalid job status: {s}")
# Path: hiveline/jobs/mongo.py
class MongoJobsDataSource(JobsDataSource):
def __init__(self, db=None):
self.db = db
if self.db is None:
self.db = get_database()
self.coll = self.db["jobs"]
def create_jobs(self, sim_id: str, service_name: str, job_ids: list[str]):
for job_id in job_ids:
try:
self.coll.insert_one(MongoJob(
service_name=service_name,
sim_id=sim_id,
job_id=job_id,
status="pending",
created=datetime.datetime.now()
).to_dict())
except pymongo.errors.DuplicateKeyError:
pass
def reset_jobs(self, sim_id: str, service_name: str, status: list[JobStatus] = None, max_started_date=None):
jobs_filter = {
"service-name": service_name,
"sim-id": sim_id
}
if status is not None:
jobs_filter["status"] = {
"$in": [str(s) for s in status]
}
if max_started_date is not None:
jobs_filter["started"] = {
"$lte": max_started_date
}
self.coll.update_many(jobs_filter, {
"$set": {
"status": "pending"
},
"$unset": {
"error": "",
"started": "",
"finished": ""
}
})
def pop_job(self, sim_id: str, service_name: str) -> str | None:
job = self.coll.find_one_and_update({
"service-name": service_name,
"sim-id": sim_id,
"status": "pending"
}, {
"$set": {
"status": "started",
"started": datetime.datetime.now()
}
})
return job["job-id"] if job is not None else None
def update_job(self, sim_id: str, service_name: str, job_id: str, status: JobStatus, error: str | None = None):
update = {
"$set": {
"status": str(status),
"finished": datetime.datetime.now()
}
}
if error is not None:
update["$set"]["error"] = error
if status == JobStatus.STARTED:
update["$set"]["started"] = datetime.datetime.now()
if status == JobStatus.FINISHED or status == JobStatus.FAILED:
update["$set"]["finished"] = datetime.datetime.now()
self.coll.update_one({
"service-name": service_name,
"sim-id": sim_id,
"job-id": job_id
}, update)
def count_jobs(self, sim_id: str, service_name: str, status: JobStatus = None) -> int:
jobs_filter = {
"service-name": service_name,
"sim-id": sim_id
}
if status is not None:
jobs_filter["status"] = str(status)
return self.coll.count_documents(jobs_filter)
def delete_jobs(self, sim_id: str, service_name: str):
self.coll.delete_many({
"service-name": service_name,
"sim-id": sim_id
})
# Path: hiveline/models/fptf.py
def _remove_empty_keys(d):
def read_datetime(time_str):
def format_datetime(dt):
def __init__(self, name=None, address=None, longitude=None, latitude=None, altitude=None):
def to_dict(self):
def to_json(self):
def from_dict(json_str):
def location_from_json(data: dict | str | None):
def __init__(self, id: str, name: str, location: Location = None, regions: list = None):
def to_dict(self):
def to_json(self):
def from_json(json_str):
def station_from_json(data: dict | str | None):
def __init__(self, id: str, station: Station, name: str, location: Location = None):
def to_dict(self):
def to_json(self):
def from_json(json_str):
def stop_from_json(data: dict | str | None):
def place_from_json(data: dict | str | None):
def __init__(self, id: str, name: str, stations: list[Station] = None):
def to_dict(self):
def to_json(self):
def from_json(json_str):
def region_from_json(data: dict | str | None):
def __init__(self, mode: str):
def __str__(self):
def __repr__(self):
def to_string(self):
def to_json(self):
def from_string(mode):
def __init__(self, id: str, name: str):
def to_dict(self):
def to_json(self):
def from_json(json_str):
def operator_from_json(data: dict | str | None):
def __init__(self, id: str, name: str, mode: Mode, routes: list, operator: Operator = None, sub_mode: str = None):
def to_dict(self):
def to_json(self):
def from_json(json_str):
def line_from_json(data: dict | str | None):
def __init__(self, id: str, line: Line, mode: Mode, stops: list[Station | Stop | Location], sub_mode: str = None):
def to_dict(self):
def to_json(self):
def route_from_json(data: dict | str | None):
def __init__(self, arrival: int = None, departure: int = None):
def to_dict(self):
def to_json(self):
def from_json(json_str):
def __init__(self, id: str, route: Route, mode: Mode, sequence: list[ScheduleSequenceElement], starts,
sub_mode=None):
def to_dict(self):
def to_json(self):
def from_json(json_str):
def schedule_from_json(data: dict | str | None):
def __init__(self, stop: Stop | Station | Location, arrival: datetime.datetime = None, arrival_delay: int = None,
arrival_platform: str = None,
departure: datetime.datetime = None, departure_delay: int = None, departure_platform: str = None):
def to_dict(self):
def to_json(self):
def from_json(json_str):
def stopover_from_json(data: dict | str | None):
def get_location(place: Location | Station | Stop | Stopover) -> Location | None:
def __init__(self, amount: float, currency: str):
def to_dict(self):
def to_json(self):
def from_json(json_str):
def price_from_json(data: dict | str | None):
def __init__(self, origin: Stop | Station | Location, destination: Stop | Station | Location,
departure: datetime.datetime, arrival: datetime.datetime, mode: Mode, sub_mode: str = None,
departure_delay: int = None,
departure_platform: str = None,
arrival_delay: int = None, arrival_platform: str = None, line: Line = None, direction: str = None,
stopovers: list[Stopover] = None, schedule: Schedule = None, public: bool = True,
operator: Operator = None,
price: Price = None, polyline: str = None):
def to_dict(self):
def to_json(self):
def from_json(json_str):
def get_departure(self, realtime=True):
def get_arrival(self, realtime=True):
def duration(self, realtime=True):
def leg_from_json(data: dict | str | None):
def __init__(self, id: str, legs: list[Leg], price: Price = None):
def to_dict(self):
def to_json(self):
def from_json(json_str):
def get_departure(self, realtime=True):
def get_arrival(self, realtime=True):
def duration(self, realtime=True):
def get_trace(self) -> list[tuple[tuple[float, float], datetime.datetime, Mode, bool]]:
def journey_from_json(data: dict | str | None):
def from_json(data: dict | str | None):
class Location:
class Station:
class Stop:
class Region:
class Mode(Enum):
class Operator:
class Line:
class Route:
class ScheduleSequenceElement:
class Schedule:
class Stopover:
class Price:
class Leg:
class Journey:
TRAIN = 'train'
BUS = 'bus'
WATERCRAFT = 'watercraft'
TAXI = 'taxi'
GONDOLA = 'gondola'
AIRCRAFT = 'aircraft'
CAR = 'car'
BICYCLE = 'bicycle'
WALKING = 'walking'
UNKNOWN = ''
# Path: hiveline/models/options.py
class Option:
def __init__(self, id: str, origin: fptf.Location, destination: fptf.Location, departure: datetime.datetime,
modes: list[fptf.Mode], journey: fptf.Journey,
trace: list[tuple[tuple[float, float], datetime.datetime, fptf.Mode, bool]] | None = None):
self.id = id
self.origin = origin
self.destination = destination
self.departure = departure
self.modes = modes
self.journey = journey
self.trace = trace
def to_dict(self):
return {
"route-option-id": self.id,
"origin": [self.origin.longitude, self.origin.latitude],
"destination": [self.destination.longitude, self.destination.latitude],
"departure": fptf.format_datetime(self.departure),
"modes": [m.to_string() for m in self.modes],
"journey": self.journey.to_dict()
}
@staticmethod
def from_dict(result):
id = result["route-option-id"]
origin = fptf.Location(longitude=result["origin"][0], latitude=result["origin"][1])
destination = fptf.Location(longitude=result["destination"][0], latitude=result["destination"][1])
departure = fptf.read_datetime(result["departure"])
modes = [fptf.Mode.from_string(m) for m in result["modes"]]
journey = fptf.journey_from_json(result["journey"])
trace = None
return Option(id, origin, destination, departure, modes, journey, trace)
def has_car(self):
"""
Check if a route option has a car leg
:return: True if the route option has a car leg, False otherwise
"""
for leg in self.journey.legs:
mode = leg.mode
if mode == fptf.Mode.CAR:
return True
return False
def get_trace(self) -> list[tuple[tuple[float, float], datetime.datetime, fptf.Mode, bool]]:
if self.trace is None:
self.trace = self.journey.get_trace()
return self.trace
# Path: hiveline/mongo/db.py
def get_database():
dotenv.load_dotenv()
user = os.getenv("UP_MONGO_USER")
password = os.getenv("UP_MONGO_PASSWORD")
domain = os.getenv("UP_MONGO_DOMAIN")
database = os.getenv("UP_MONGO_DATABASE")
connection_string = "mongodb://%s:%s@%s/%s?authSource=admin" % (user, password, domain, database)
client = MongoClient(connection_string)
return client[database]
# Path: hiveline/routing/resource_builder.py
def build_resources(data_dir: str, place, sim_date: datetime.date) -> RoutingServerConfig:
def __get_closest_link(link_list, target_date: datetime.date, ignore_future: bool = False):
def __ensure_data_downloaded(data_dir: str, link_object, file_name_extension: str):
def __ensure_closest_pbf_downloaded(data_dir, place, sim_date):
def __ensure_closest_gtfs_downloaded(data_dir, place, sim_date):
# Path: hiveline/routing/clients/delayed.py
class DelayedRoutingClient(RoutingClient):
def __init__(self, base: RoutingClient):
# This dictionary stores the delay data for each operator
self.delay_data = _read_delay_statistics()
self.base = base
def __get_random_delay(self, operator_name):
"""
This function returns a random delay for the specified operator. The delay is either cancelled or a random value
between the specified interval.
:param operator_name: the name of the operator
:return: a dictionary with the keys "cancelled" and "delay"
"""
operator_name = operator_name.lower()
if operator_name not in self.delay_data:
operator_name = "average"
cancelled_percent = self.delay_data[operator_name]["cancelled_percent"]
if np.random.random() * 100 < cancelled_percent:
return {
"cancelled": True
}
starts = self.delay_data[operator_name]["starts"]
identity = self.delay_data[operator_name]["identity"]
weights = self.delay_data[operator_name]["weights"]
key = np.random.choice(identity, p=weights)
interval_start = starts[key]
interval_end = interval_start + 5
if key < len(starts) - 1:
interval_end = starts[key + 1]
val = np.random.randint(interval_start, interval_end)
return {
"cancelled": False,
"delay": val
}
time_dependent_modes = [fptf.Mode.TRAIN, fptf.Mode.BUS, fptf.Mode.WATERCRAFT, fptf.Mode.AIRCRAFT, fptf.Mode.GONDOLA]
def get_journeys(self, from_lat, from_lon, to_lat, to_lon, departure, modes):
"""
This function returns a delayed itinerary for the specified parameters. It uses the fastest itinerary from OTP
and adds a random delay to each leg of the itinerary. If a leg is cancelled or the traveller cannot catch the
next connection, OTP may be queried multiple times.
:param from_lat: latitude of the start location
:param from_lon: longitude of the start location
:param to_lat: latitude of the destination
:param to_lon: longitude of the destination
:param departure: departure time as datetime object
:param modes: list of modes to use for the trip (e.g. ["WALK", "TRANSIT"])
:return: a delayed journey
"""
raise Exception("delays not currently supported. please use the --no-delays flag or set use_delays=False")
journey = _get_fastest_journey(self.base.get_journeys(from_lat, from_lon, to_lat, to_lon, departure, modes))
if journey is None:
return None
result_legs = []
max_calls = 20
re_calc_count = 0
for call in range(max_calls):
steps = 0
current_delay = 0 # in minutes
# iterate legs
leg_count = len(journey.legs)
while steps < leg_count:
time_independent_start = steps
while steps < leg_count:
leg = journey.legs[steps]
leg.departure_delay = current_delay * 60
leg.arrival_delay = current_delay * 60
if leg.mode in self.time_dependent_modes:
break
steps += 1
if steps >= leg_count:
# we can catch the last connection
break
# point in time when the traveller arrives at the station
real_min_departure = journey.legs[0].departure
if steps > 0:
real_min_departure = journey.legs[steps - 1].arrival
# legs[steps] is a time dependent leg
leg = journey.legs[steps]
# get the operator name
operator_name = leg.operator.name
# get the delay
delay = self.__get_random_delay(operator_name)
# check if the connection is cancelled
if delay["cancelled"]:
# trip is cancelled, reset the steps to the start of the time independent legs
steps = time_independent_start
break
delay_seconds = int(delay["delay"]) * 60
real_departure = leg.departure + datetime.timedelta(seconds=delay_seconds)
if real_departure < real_min_departure:
# we cannot catch the connection, reset the steps to the start of the time independent legs
steps = time_independent_start
break
current_delay = delay["delay"]
leg.departure_delay = delay_seconds
leg.arrival_delay = delay_seconds
steps += 1
if steps >= leg_count:
# we can catch the last connection
result_legs += journey.legs
break
# we cannot catch the last connection
result_legs += journey.legs[:steps]
# route from the last station to the destination
last_leg = journey.legs[0]
position = last_leg.origin
new_dep = last_leg.departure
if steps > 0:
last_leg = journey.legs[steps - 1]
position = last_leg.destination
new_dep = last_leg.arrival + datetime.timedelta(seconds=last_leg.arrival_delay)
pos_lon = position.longitude
pos_lat = position.latitude
journey = _get_fastest_journey(self.base.get_journeys(pos_lat, pos_lon, to_lat, to_lon, new_dep, modes))
re_calc_count += 1
if journey is None:
return None
return fptf.Journey(
id=None,
legs=result_legs
)
# Path: hiveline/routing/clients/routing_client.py
class RoutingClient(ABC):
@abstractmethod
def get_journeys(self, from_lat: float, from_lon: float, to_lat: float, to_lon: float, departure: datetime.datetime,
modes: list[fptf.Mode]) -> list[fptf.Journey] | None:
"""
Get routes from the router
:param from_lat: the latitude of the starting point
:param from_lon: the longitude of the starting point
:param to_lat: the latitude of the destination
:param to_lon: the longitude of the destination
:param departure: the departure time as datetime object
:param modes: the fptf modes to use for routing
:return: a list of fptf journey
"""
pass
# Path: hiveline/routing/servers/routing_server.py
class RoutingServer(ABC):
@abstractmethod
def build(self, config: RoutingServerConfig, force_rebuild=False) -> list[str]:
"""
Build the graph for the routing server. This function returns a list of files that are required for routing.
:param config: the configuration for the routing server
:param force_rebuild: if True, the graph will be rebuilt even if it already exists in the cache
:return: a list of files that are required for routing
"""
pass
@abstractmethod
def start(self, config: RoutingServerConfig, built_files: list[str]):
"""
Start the routing server. It should return when the server is ready to accept requests.
:param built_files: the files that were built for the routing server
:param config: the configuration for the routing server
"""
pass
@abstractmethod
def stop(self):
"""
Stop the routing server.
"""
pass
@abstractmethod
def get_meta(self):
"""
Get the metadata of the routing server. Includes the version, name, etc.
"""
pass
# Path: hiveline/vc/vc_extract.py
def extract_origin_loc(vc):
def extract_destination_loc(vc):
def extract_departure(vc, sim) -> datetime:
def has_motor_vehicle(vc):
def has_motorcycle(vc):
def extract_traveller(vc):
def would_use_motorized_vehicle(vc):
def __validate_location(loc):
def should_route(vc):
# Path: hiveline/routing/vc_router.py
import sys
import os
import argparse
import datetime
import time
import uuid
import pymongo.errors
from dotenv import load_dotenv
from hiveline.jobs.jobs import JobHandler, JobStatus
from hiveline.jobs.mongo import MongoJobsDataSource
from hiveline.models import fptf
from hiveline.models.options import Option
from hiveline.mongo.db import get_database
from hiveline.routing import resource_builder
from hiveline.routing.clients.delayed import DelayedRoutingClient
from hiveline.routing.clients.routing_client import RoutingClient
from hiveline.routing.servers.routing_server import RoutingServer
from hiveline.vc import vc_extract
from hiveline.routing.servers.otp import OpenTripPlannerRoutingServer
from hiveline.routing.clients.otp import OpenTripPlannerRoutingClient
from hiveline.routing.servers.bifrost import BifrostRoutingServer
from hiveline.routing.clients.bifrost import BifrostRoutingClient
if __name__ == "__main__":
load_dotenv()
sys.path.append(os.getenv("PROJECT_PATH"))
| def __create_route_calculation_jobs(db, sim_id, job_handler): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: uhppoted/uhppoted-app-home-assistant
# Path: custom_components/uhppoted/const.py
DOMAIN = 'uhppoted'
# Path: custom_components/uhppoted/const.py
CONF_BIND_ADDR = 'bind_address'
# Path: custom_components/uhppoted/const.py
CONF_BROADCAST_ADDR = 'broadcast_address'
# Path: custom_components/uhppoted/const.py
CONF_LISTEN_ADDR = 'listen_address'
# Path: custom_components/uhppoted/const.py
CONF_DEBUG = 'debug'
# Path: custom_components/uhppoted/const.py
ATTR_ADDRESS = 'address'
# Path: custom_components/uhppoted/const.py
ATTR_NETMASK = 'netmask'
# Path: custom_components/uhppoted/const.py
ATTR_GATEWAY = 'gateway'
# Path: custom_components/uhppoted/const.py
ATTR_FIRMWARE = 'firmware'
# Path: custom_components/uhppoted/config.py
def configure_controllers(options, f):
if CONF_CONTROLLERS in options:
controllers = options[CONF_CONTROLLERS]
for c in controllers:
controller = f'{c[CONF_CONTROLLER_ID]}'.strip()
serial_no = f'{c[CONF_CONTROLLER_SERIAL_NUMBER]}'.strip()
address = f'{c[CONF_CONTROLLER_ADDR]}'.strip()
f(controller, serial_no, address)
# Path: custom_components/uhppoted/config.py
def configure_doors(options, g):
if CONF_CONTROLLERS in options and CONF_DOORS in options:
controllers = options[CONF_CONTROLLERS]
doors = options[CONF_DOORS]
for c in controllers:
controller = f'{c[CONF_CONTROLLER_ID]}'.strip()
serial_no = f'{c[CONF_CONTROLLER_SERIAL_NUMBER]}'.strip()
address = f'{c[CONF_CONTROLLER_ADDR]}'.strip()
for d in doors:
door = f'{d[CONF_DOOR_ID]}'.strip()
door_no = f'{d[CONF_DOOR_NUMBER]}'.strip()
door_controller = f'{d[CONF_DOOR_CONTROLLER]}'.strip()
if door_controller == controller:
g(controller, serial_no, door, door_no)
# Path: custom_components/uhppoted/config.py
def configure_cards(options, f):
if CONF_CARDS in options:
cards = options[CONF_CARDS]
for c in cards:
card = f'{c[CONF_CARD_NUMBER]}'.strip()
name = f'{c[CONF_CARD_NAME]}'.strip()
unique_id = f'{c[CONF_CARD_UNIQUE_ID]}'.strip()
f(card, name, unique_id)
# Path: custom_components/uhppoted/config.py
def configure_driver(options):
bind = options[CONF_BIND_ADDR]
broadcast = options[CONF_BROADCAST_ADDR]
listen = options[CONF_LISTEN_ADDR]
debug = options[CONF_DEBUG]
if CONF_CONTROLLERS in options:
controllers = [int(f'{v[CONF_CONTROLLER_SERIAL_NUMBER]}') for v in options[CONF_CONTROLLERS]]
else:
controllers = []
return {
'api': uhppote.Uhppote(bind, broadcast, listen, debug),
'controllers': controllers,
}
# Path: custom_components/uhppoted/controller.py
class ControllerInfo(SensorEntity):
_attr_icon = 'mdi:identifier'
_attr_has_entity_name: True
_attr_translation_key = 'controller_id'
def __init__(self, u, controller, serial_no):
super().__init__()
_LOGGER.debug(f'controller {controller} {serial_no}')
self.uhppote = u
self.controller = controller
self.serial_no = int(f'{serial_no}')
self._name = f'uhppoted.controller.{controller}.info'.lower()
self._state = None
self._attributes: Dict[str, Any] = {
ATTR_ADDRESS: '',
ATTR_NETMASK: '',
ATTR_GATEWAY: '',
ATTR_FIRMWARE: '',
}
self._available = False
@property
def unique_id(self) -> str:
return f'uhppoted.controller.{self.controller}.info'.lower()
@property
def name(self) -> str:
return self._name
@property
def available(self) -> bool:
return self._available
@property
def state(self) -> Optional[str]:
if self._state != None:
return f'{self._state}'
return None
@property
def extra_state_attributes(self) -> Dict[str, Any]:
return self._attributes
async def async_update(self):
_LOGGER.debug(f'controller:{self.controller} update info')
try:
response = self.uhppote.get_controller(self.serial_no)
if response.controller == self.serial_no:
self._state = response.controller
self._available = True
self._attributes[ATTR_ADDRESS] = f'{response.ip_address}'
self._attributes[ATTR_NETMASK] = f'{response.subnet_mask}'
self._attributes[ATTR_GATEWAY] = f'{response.gateway}'
self._attributes[ATTR_FIRMWARE] = f'{response.version} {response.date:%Y-%m-%d}'
except (Exception):
self._available = False
_LOGGER.exception(f'error retrieving controller {self.controller} information')
# Path: custom_components/uhppoted/door.py
class ControllerDoor(SensorEntity):
_attr_icon = 'mdi:door'
_attr_has_entity_name: True
def __init__(self, u, controller, serial_no, door, door_id):
super().__init__()
_LOGGER.debug(f'controller {controller}: door:{door}')
self.uhppote = u
self.controller = controller
self.serial_no = int(f'{serial_no}')
self.door = door
self.door_id = int(f'{door_id}')
self._name = f'uhppoted.door.{door}'.lower()
self._unlocked = None
self._open = None
self._button = None
self._available = False
self._attributes: Dict[str, Any] = {
ATTR_DOOR_CONTROLLER: f'{serial_no}',
ATTR_DOOR_NUMBER: f'{door_id}',
}
@property
def unique_id(self) -> str:
return f'uhppoted.door.{self.door}'.lower()
@property
def name(self) -> str:
return self._name
@property
def available(self) -> bool:
return self._available
@property
def state(self) -> Optional[str]:
if self._available:
s = []
if self._button == True:
s.append('PRESSED')
if self._unlocked == False:
s.append('LOCKED')
elif self._unlocked == True:
s.append('UNLOCKED')
if self._open == False:
s.append('CLOSED')
elif self._open == True:
s.append('OPEN')
return ' '.join(s)
return None
@property
def extra_state_attributes(self) -> Dict[str, Any]:
return self._attributes
async def async_update(self):
_LOGGER.debug(f'controller:{self.controller} update door {self.door} state')
try:
response = self.uhppote.get_status(self.serial_no)
if response.controller == self.serial_no:
if self.door_id == 1:
self._open = response.door_1_open == True
self._button = response.door_1_button == True
self._unlocked = response.relays & 0x01 == 0x01
elif self.door_id == 2:
self._open = response.door_2_open == True
self._button = response.door_2_button == True
self._unlocked = response.relays & 0x02 == 0x02
elif self.door_id == 3:
self._open = response.door_3_open == True
self._button = response.door_3_button == True
self._unlocked = response.relays & 0x04 == 0x04
elif self.door_id == 4:
self._open = response.door_4_open == True
self._button = response.door_4_button == True
self._unlocked = response.relays & 0x08 == 0x08
else:
self._open = None
self._button = None
self._unlocked = None
self._available = True
except (Exception):
self._available = False
_LOGGER.exception(f'error retrieving controller {self.controller} status')
# Path: custom_components/uhppoted/door.py
class ControllerDoorOpen(SensorEntity):
_attr_icon = 'mdi:door'
_attr_has_entity_name: True
def __init__(self, u, controller, serial_no, door, door_id):
super().__init__()
_LOGGER.debug(f'controller {controller}: door:{door} open')
self.uhppote = u
self.controller = controller
self.serial_no = int(f'{serial_no}')
self.door = door
self.door_id = int(f'{door_id}')
self._name = f'uhppoted.door.{door}.open'.lower()
self._open = None
self._available = False
@property
def unique_id(self) -> str:
return f'uhppoted.door.{self.door}.open'.lower()
@property
def name(self) -> str:
return self._name
@property
def available(self) -> bool:
return self._available
@property
def state(self) -> Optional[str]:
if self._available:
if self._open == False:
return 'CLOSED'
elif self._open == True:
return 'OPEN'
return None
async def async_update(self):
_LOGGER.debug(f'controller:{self.controller} update door {self.door}.open state')
try:
response = self.uhppote.get_status(self.serial_no)
if response.controller == self.serial_no:
if self.door_id == 1:
self._open = response.door_1_open == True
elif self.door_id == 2:
self._open = response.door_2_open == True
elif self.door_id == 3:
self._open = response.door_3_open == True
elif self.door_id == 4:
self._open = response.door_4_open == True
else:
self._open = None
self._available = True
except (Exception):
self._available = False
_LOGGER.exception(f'error retrieving controller {self.controller} status')
# Path: custom_components/uhppoted/door.py
class ControllerDoorLock(SensorEntity):
_attr_icon = 'mdi:door'
_attr_has_entity_name: True
def __init__(self, u, controller, serial_no, door, door_id):
super().__init__()
_LOGGER.debug(f'controller {controller}: door:{door} lock')
self.uhppote = u
self.controller = controller
self.serial_no = int(f'{serial_no}')
self.door = door
self.door_id = int(f'{door_id}')
self._name = f'uhppoted.door.{door}.lock'.lower()
self._unlocked = None
self._available = False
@property
def unique_id(self) -> str:
return f'uhppoted.door.{self.door}.lock'.lower()
@property
def name(self) -> str:
return self._name
@property
def available(self) -> bool:
return self._available
@property
def state(self) -> Optional[str]:
if self._available:
if self._unlocked == False:
return 'LOCKED'
elif self._unlocked == True:
return 'UNLOCKED'
return None
async def async_update(self):
_LOGGER.debug(f'controller:{self.controller} update door {self.door}.lock state')
try:
response = self.uhppote.get_status(self.serial_no)
if response.controller == self.serial_no:
if self.door_id == 1:
self._unlocked = response.relays & 0x01 == 0x01
elif self.door_id == 2:
self._unlocked = response.relays & 0x02 == 0x02
elif self.door_id == 3:
self._unlocked = response.relays & 0x04 == 0x04
elif self.door_id == 4:
self._unlocked = response.relays & 0x08 == 0x08
else:
self._unlocked = None
self._available = True
except (Exception):
self._available = False
_LOGGER.exception(f'error retrieving controller {self.controller} status')
# Path: custom_components/uhppoted/door.py
class ControllerDoorButton(SensorEntity):
_attr_icon = 'mdi:door'
_attr_has_entity_name: True
def __init__(self, u, controller, serial_no, door, door_id):
super().__init__()
_LOGGER.debug(f'controller {controller}: door:{door} button')
self.uhppote = u
self.controller = controller
self.serial_no = int(f'{serial_no}')
self.door = door
self.door_id = int(f'{door_id}')
self._name = f'uhppoted.door.{door}.button'.lower()
self._pressed = None
self._available = False
@property
def unique_id(self) -> str:
return f'uhppoted.door.{self.door}.button'.lower()
@property
def name(self) -> str:
return self._name
@property
def available(self) -> bool:
return self._available
@property
def state(self) -> Optional[str]:
if self._available:
if self._pressed == True:
return 'PRESSED'
elif self._pressed == False:
return 'RELEASED'
return None
async def async_update(self):
_LOGGER.debug(f'controller:{self.controller} update door {self.door} button state')
try:
response = self.uhppote.get_status(self.serial_no)
if response.controller == self.serial_no:
if self.door_id == 1:
self._pressed = response.door_1_button == True
elif self.door_id == 2:
self._pressed = response.door_2_button == True
elif self.door_id == 3:
self._pressed = response.door_3_button == True
elif self.door_id == 4:
self._pressed = response.door_4_button == True
else:
self._pressed = None
self._available = True
except (Exception):
self._available = False
_LOGGER.exception(f'error retrieving controller {self.controller} status')
# Path: custom_components/uhppoted/door.py
class ControllerDoorMode(SelectEntity):
_attr_icon = 'mdi:door'
_attr_has_entity_name: True
def __init__(self, u, controller, serial_no, door, door_id):
super().__init__()
_LOGGER.debug(f'controller {controller}: door:{door} mode')
self.uhppote = u
self.controller = controller
self.serial_no = int(f'{serial_no}')
self.door = door
self.door_id = int(f'{door_id}')
self._name = f'uhppoted.door.{door}.mode'.lower()
self._mode = None
self._available = False
@property
def unique_id(self) -> str:
return f'uhppoted.door.{self.door}.mode'.lower()
@property
def name(self) -> str:
return self._name
@property
def available(self) -> bool:
return self._available
@property
def options(self):
return ['CONTROLLED', 'LOCKED', 'UNLOCKED']
@property
def current_option(self) -> Optional[str]:
if self._available:
if self._mode == 1:
return 'UNLOCKED'
elif self._mode == 2:
return 'LOCKED'
elif self._mode == 3:
return 'CONTROLLED'
else:
return 'UNKNOWN'
return None
async def async_select_option(self, option):
if option == 'UNLOCKED':
self._mode = 1
elif option == 'LOCKED':
self._mode = 2
elif option == 'CONTROLLED':
self._mode = 3
try:
response = self.uhppote.get_door_control(self.serial_no, self.door_id)
if response.controller == self.serial_no and response.door == self.door_id:
mode = self._mode
delay = response.delay
response = self.uhppote.set_door_control(self.serial_no, self.door_id, mode, delay)
if response.controller == self.serial_no and response.door == self.door_id:
_LOGGER.info(f'set door {self.door} mode ({option})')
self._mode = response.mode
self._available = True
else:
raise ValueError(f'failed to set controller {self.controller} door {self.door} mode')
except (Exception):
self._available = False
_LOGGER.exception(f'error retrieving controller {self.controller} door {self.door} mode')
async def async_update(self):
_LOGGER.debug(f'controller:{self.controller} update door {self.door} mode')
try:
response = self.uhppote.get_door_control(self.serial_no, self.door_id)
if response.controller == self.serial_no and response.door == self.door_id:
self._mode = response.mode
self._available = True
except (Exception):
self._available = False
_LOGGER.exception(f'error retrieving controller {self.controller} door {self.door} mode')
# Path: custom_components/uhppoted/card.py
class CardInfo(SensorEntity):
_attr_icon = 'mdi:card-account-details'
_attr_has_entity_name: True
def __init__(self, u, card, name, unique_id):
super().__init__()
_LOGGER.debug(f'card {card}')
self.driver = u
self.card = int(f'{card}')
self._unique_id = unique_id
self._name = f'uhppoted.card.{card}.info'.lower()
self._cardholder = name
self._start_date = None
self._end_date = None
self._permissions = None
self._available = False
@property
def unique_id(self) -> str:
return f'uhppoted.card.{self._unique_id}.info'.lower()
@property
def name(self) -> str:
return self._name
@property
def available(self) -> bool:
return self._available
@property
def state(self) -> Optional[str]:
if self._available:
today = date.today()
state = []
if self._cardholder.strip() != '':
state.append(self._cardholder)
if self._start_date and self._start_date <= today and self._end_date and self._end_date >= today:
state.append('VALID')
elif self._start_date and self._start_date > today:
state.append('NOT VALID')
elif self._end_date and self._end_date < today:
state.append('EXPIRED')
if self._permissions and len(self._permissions) < 1:
state.append('NO ACCESS')
return ', '.join(state)
return None
@property
def extra_state_attributes(self) -> Dict[str, Any]:
permissions = f"','.join(self._permissions)" if self._permissions else None
return {
ATTR_CARD_HOLDER: self._cardholder,
ATTR_CARD_STARTDATE: self._start_date,
ATTR_CARD_ENDDATE: self._end_date,
ATTR_CARD_PERMISSIONS: permissions,
}
async def async_update(self):
_LOGGER.debug(f'card:{self.card} state')
try:
start_date = None
end_date = None
for controller in self.driver['controllers']:
response = self.driver['api'].get_card(controller, self.card)
if response.controller == controller and response.card_number == self.card:
if not start_date or response.start_date < start_date:
start_date = response.start_date
if not end_date or response.end_date > end_date:
end_date = response.end_date
self._start_date = start_date
self._end_date = end_date
self._available = True
except (Exception):
self._available = False
_LOGGER.exception(f'error retrieving card {self.card} state')
# Path: custom_components/uhppoted/card.py
class CardHolder(SensorEntity):
_attr_icon = 'mdi:card-account-details'
_attr_has_entity_name: True
def __init__(self, u, card, name, unique_id):
super().__init__()
_LOGGER.debug(f'card {card}')
self.driver = u
self.card = int(f'{card}')
self._unique_id = unique_id
self._name = f'uhppoted.card.{card}.cardholder'.lower()
self._cardholder = name
self._available = True
self._attributes: Dict[str, Any] = {}
@property
def unique_id(self) -> str:
return f'uhppoted.card.{self._unique_id}.cardholder'.lower()
@property
def name(self) -> str:
return self._name
@property
def available(self) -> bool:
return self._available
@property
def state(self) -> Optional[str]:
if self._available:
return self._cardholder
return None
@property
def extra_state_attributes(self) -> Dict[str, Any]:
return self._attributes
async def async_update(self):
_LOGGER.debug(f'card:{self.card} cardholder')
self._available = True
# Path: custom_components/uhppoted/sensor.py
import datetime
import logging
from homeassistant.core import HomeAssistant
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import ConfigType
from homeassistant.helpers.typing import DiscoveryInfoType
from homeassistant.components.sensor import SensorEntity
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from uhppoted import uhppote
from .const import DOMAIN
from .const import CONF_BIND_ADDR
from .const import CONF_BROADCAST_ADDR
from .const import CONF_LISTEN_ADDR
from .const import CONF_DEBUG
from .const import ATTR_ADDRESS
from .const import ATTR_NETMASK
from .const import ATTR_GATEWAY
from .const import ATTR_FIRMWARE
from .config import configure_controllers
from .config import configure_doors
from .config import configure_cards
from .config import configure_driver
from .controller import ControllerInfo
from .door import ControllerDoor
from .door import ControllerDoorOpen
from .door import ControllerDoorLock
from .door import ControllerDoorButton
from .door import ControllerDoorMode
from .card import CardInfo
from .card import CardHolder
from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
# Configuration constants
# Attribute constants
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback):
config = entry.data
options = entry.options
u = configure_driver(options)
entities = []
def f(controller, serial_no, address):
| entities.extend([ |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: shadowpa0327/FLORA
# Path: my_meter.py
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self):
self._world_size = dist.get_world_size()
self.reset()
def reset(self):
# local
self._val = 0
self._sum = 0
self._count = 0
# global
self._history_avg = 0
self._history_count = 0
self._avg = None
def update(self, val, n=1):
self._val = val
self._sum += val * n
self._count += n
self._avg = None
@property
def val(self):
return self._val
@property
def count(self):
return self._count + self._history_count
@property
def avg(self):
if self._avg is None:
# compute avg
r = self._history_count / max(1, self._history_count + self._count)
_avg = self._sum / max(1, self._count)
self._avg = r * self._history_avg + (1.0 - r) * _avg
return self._avg
def sync(self):
buf = torch.tensor([self._sum, self._count],
dtype=torch.float32).cuda()
buf = reduce_tensor(buf, 1)
_sum, _count = buf.tolist()
_avg = _sum / max(1, _count)
r = self._history_count / max(1, self._history_count + _count)
self._history_avg = r * self._history_avg + (1.0 - r) * _avg
self._history_count += _count
self._sum = 0
self._count = 0
self._avg = None
# Path: config.py
def get_config(args):
"""Get a yacs CfgNode object with default values."""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern
config = _C.clone()
update_config(config, args)
return config
# Path: models/build.py
def build_model(config):
model_type = config.MODEL.TYPE
if model_type == 'swin':
model = SwinTransformer(
img_size=config.DATA.IMG_SIZE,
patch_size=config.MODEL.SWIN.PATCH_SIZE,
in_chans=config.MODEL.SWIN.IN_CHANS,
num_classes=config.MODEL.NUM_CLASSES,
embed_dim=config.MODEL.SWIN.EMBED_DIM,
depths=config.MODEL.SWIN.DEPTHS,
num_heads=config.MODEL.SWIN.NUM_HEADS,
window_size=config.MODEL.SWIN.WINDOW_SIZE,
mlp_ratio=config.MODEL.SWIN.MLP_RATIO,
qkv_bias=config.MODEL.SWIN.QKV_BIAS,
qk_scale=config.MODEL.SWIN.QK_SCALE,
drop_rate=config.MODEL.DROP_RATE,
drop_path_rate=config.MODEL.DROP_PATH_RATE,
ape=config.MODEL.SWIN.APE,
patch_norm=config.MODEL.SWIN.PATCH_NORM,
use_checkpoint=config.TRAIN.USE_CHECKPOINT,
fused_window_process=config.FUSED_WINDOW_PROCESS
)
elif model_type == 'deit':
model = VisionTransformer(
img_size=config.DATA.IMG_SIZE,
patch_size=config.MODEL.DEIT.PATCH_SIZE,
in_chans=config.MODEL.DEIT.IN_CHANS,
num_classes=config.MODEL.NUM_CLASSES,
embed_dim=config.MODEL.DEIT.EMBED_DIM,
depth=config.MODEL.DEIT.DEPTH,
num_heads = config.MODEL.DEIT.NUM_HEADS,
mlp_ratio = config.MODEL.DEIT.MLP_RATIO,
qkv_bias = config.MODEL.DEIT.QKV_BIAS,
drop_rate=config.MODEL.DROP_RATE,
drop_path_rate=config.MODEL.DROP_PATH_RATE,
)
elif model_type == 'lr_swin':
model = LRSwinTransformer(
img_size=config.DATA.IMG_SIZE,
patch_size=config.MODEL.SWIN.PATCH_SIZE,
in_chans=config.MODEL.SWIN.IN_CHANS,
num_classes=config.MODEL.NUM_CLASSES,
embed_dim=config.MODEL.SWIN.EMBED_DIM,
depths=config.MODEL.SWIN.DEPTHS,
num_heads=config.MODEL.SWIN.NUM_HEADS,
window_size=config.MODEL.SWIN.WINDOW_SIZE,
mlp_ratio=config.MODEL.SWIN.MLP_RATIO,
qkv_bias=config.MODEL.SWIN.QKV_BIAS,
qk_scale=config.MODEL.SWIN.QK_SCALE,
drop_rate=config.MODEL.DROP_RATE,
drop_path_rate=config.MODEL.DROP_PATH_RATE,
ape=config.MODEL.SWIN.APE,
patch_norm=config.MODEL.SWIN.PATCH_NORM,
use_checkpoint=config.TRAIN.USE_CHECKPOINT,
fused_window_process=config.FUSED_WINDOW_PROCESS
)
elif model_type == 'lr_swin_subnet':
model = LRSwinTransformerSubnet(
svd_config=config.MODEL.SWIN.SVD_CONFIG,
img_size=config.DATA.IMG_SIZE,
patch_size=config.MODEL.SWIN.PATCH_SIZE,
in_chans=config.MODEL.SWIN.IN_CHANS,
num_classes=config.MODEL.NUM_CLASSES,
embed_dim=config.MODEL.SWIN.EMBED_DIM,
depths=config.MODEL.SWIN.DEPTHS,
num_heads=config.MODEL.SWIN.NUM_HEADS,
window_size=config.MODEL.SWIN.WINDOW_SIZE,
mlp_ratio=config.MODEL.SWIN.MLP_RATIO,
qkv_bias=config.MODEL.SWIN.QKV_BIAS,
qk_scale=config.MODEL.SWIN.QK_SCALE,
drop_rate=config.MODEL.DROP_RATE,
drop_path_rate=config.MODEL.DROP_PATH_RATE,
ape=config.MODEL.SWIN.APE,
patch_norm=config.MODEL.SWIN.PATCH_NORM,
use_checkpoint=config.TRAIN.USE_CHECKPOINT,
fused_window_process=config.FUSED_WINDOW_PROCESS
)
elif model_type == 'lr_deit':
model = LRVisionTransformer(
img_size=config.DATA.IMG_SIZE,
patch_size=config.MODEL.DEIT.PATCH_SIZE,
in_chans=config.MODEL.DEIT.IN_CHANS,
num_classes=config.MODEL.NUM_CLASSES,
embed_dim=config.MODEL.DEIT.EMBED_DIM,
depth=config.MODEL.DEIT.DEPTH,
num_heads = config.MODEL.DEIT.NUM_HEADS,
mlp_ratio = config.MODEL.DEIT.MLP_RATIO,
qkv_bias = config.MODEL.DEIT.QKV_BIAS,
drop_rate=config.MODEL.DROP_RATE,
drop_path_rate=config.MODEL.DROP_PATH_RATE,
fused_lr=config.MODEL.DEIT.FUSE_LR,
)
elif model_type == 'lr_deit_subnet':
model = LRVisionTransformerSubnet(
svd_config = config.MODEL.DEIT.SVD_CONFIG,
img_size=config.DATA.IMG_SIZE,
patch_size=config.MODEL.DEIT.PATCH_SIZE,
in_chans=config.MODEL.DEIT.IN_CHANS,
num_classes=config.MODEL.NUM_CLASSES,
embed_dim=config.MODEL.DEIT.EMBED_DIM,
depth=config.MODEL.DEIT.DEPTH,
num_heads = config.MODEL.DEIT.NUM_HEADS,
mlp_ratio = config.MODEL.DEIT.MLP_RATIO,
qkv_bias = config.MODEL.DEIT.QKV_BIAS,
drop_rate=config.MODEL.DROP_RATE,
drop_path_rate=config.MODEL.DROP_PATH_RATE,
)
else:
raise NotImplementedError(f"Unkown model: {model_type}")
return model
# Path: data/build.py
def build_loader(config):
config.defrost()
dataset_train, config.MODEL.NUM_CLASSES = build_dataset(
is_train=True, config=config)
config.freeze()
print(
f"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build train dataset")
dataset_val, _ = build_dataset(is_train=False, config=config)
print(
f"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build val dataset")
mixup_active = config.AUG.MIXUP > 0 or config.AUG.CUTMIX > 0. or config.AUG.CUTMIX_MINMAX is not None
sampler_train = MyDistributedSampler(
dataset_train, shuffle=True,
drop_last=False, padding=True, pair=mixup_active and config.DISTILL.ENABLED,
)
sampler_val = MyDistributedSampler(
dataset_val, shuffle=False,
drop_last=False, padding=False, pair=False,
)
# TinyViT Dataset Wrapper
if config.DISTILL.ENABLED:
dataset_train = DatasetWrapper(dataset_train,
logits_path=config.DISTILL.TEACHER_LOGITS_PATH,
topk=config.DISTILL.LOGITS_TOPK,
write=config.DISTILL.SAVE_TEACHER_LOGITS,
)
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=config.DATA.BATCH_SIZE,
num_workers=config.DATA.NUM_WORKERS,
pin_memory=config.DATA.PIN_MEMORY,
# modified for TinyViT, we save logits of all samples
drop_last=not config.DISTILL.SAVE_TEACHER_LOGITS,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=int(config.DATA.BATCH_SIZE*1.5),
shuffle=False,
num_workers=config.DATA.NUM_WORKERS,
pin_memory=config.DATA.PIN_MEMORY,
drop_last=False
)
# setup mixup / cutmix
mixup_fn = None
if mixup_active:
mixup_t = Mixup if not config.DISTILL.ENABLED else Mixup_record
if config.DISTILL.ENABLED and config.AUG.MIXUP_MODE != "pair2":
# change to pair2 mode for saving logits
config.defrost()
config.AUG.MIXUP_MODE = 'pair2'
config.freeze()
mixup_fn = mixup_t(
mixup_alpha=config.AUG.MIXUP, cutmix_alpha=config.AUG.CUTMIX, cutmix_minmax=config.AUG.CUTMIX_MINMAX,
prob=config.AUG.MIXUP_PROB, switch_prob=config.AUG.MIXUP_SWITCH_PROB, mode=config.AUG.MIXUP_MODE,
label_smoothing=config.MODEL.LABEL_SMOOTHING, num_classes=config.MODEL.NUM_CLASSES)
return dataset_train, dataset_val, data_loader_train, data_loader_val, mixup_fn
# Path: logger.py
@functools.lru_cache()
def create_logger(output_dir, dist_rank=0, name=''):
# create logger
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.propagate = False
# create formatter
fmt = '[%(asctime)s %(name)s] (%(filename)s %(lineno)d): %(levelname)s %(message)s'
color_fmt = colored('[%(asctime)s %(name)s]', 'green') + \
colored('(%(filename)s %(lineno)d)', 'yellow') + \
': %(levelname)s %(message)s'
# create console handlers for master process
if dist_rank == 0:
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(
logging.Formatter(fmt=color_fmt, datefmt='%Y-%m-%d %H:%M:%S'))
logger.addHandler(console_handler)
# create file handlers
file_handler = logging.FileHandler(os.path.join(
output_dir, f'log_rank{dist_rank}.txt'), mode='a')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter(
fmt=fmt, datefmt='%Y-%m-%d %H:%M:%S'))
logger.addHandler(file_handler)
return logger
# Path: utils.py
def load_checkpoint(config, model, optimizer, lr_scheduler, loss_scaler, logger, search_space = None):
logger.info(
f"==============> Resuming form {config.MODEL.RESUME}....................")
if config.MODEL.RESUME.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
config.MODEL.RESUME, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(config.MODEL.RESUME, map_location='cpu')
params = checkpoint['model']
now_model_state = model.state_dict()
mnames = ['head.weight', 'head.bias'] # (cls, 1024), (cls, )
if mnames[-1] in params:
ckpt_head_bias = params[mnames[-1]]
now_model_bias = now_model_state[mnames[-1]]
if ckpt_head_bias.shape != now_model_bias.shape:
num_classes = 1000
if len(ckpt_head_bias) == 21841 and len(now_model_bias) == num_classes:
logger.info("Convert checkpoint from 21841 to 1k")
# convert 22kto1k
fname = './imagenet_1kto22k.txt'
with open(fname) as fin:
mapping = torch.Tensor(
list(map(int, fin.readlines()))).to(torch.long)
for name in mnames:
v = params[name]
shape = list(v.shape)
shape[0] = num_classes
mean_v = v[mapping[mapping != -1]].mean(0, keepdim=True)
v = torch.cat([v, mean_v], 0)
v = v[mapping]
params[name] = v
msg = model.load_state_dict(params, strict=False)
logger.info(msg)
max_accuracy = 0.0
if not config.EVAL_MODE:
if 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint:
if optimizer is not None:
optimizer.load_state_dict(checkpoint['optimizer'])
if lr_scheduler is not None:
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
if 'scaler' in checkpoint:
loss_scaler.load_state_dict(checkpoint['scaler'])
logger.info(
f"=> loaded successfully '{config.MODEL.RESUME}' (epoch {checkpoint['epoch']})")
if 'max_accuracy' in checkpoint:
max_accuracy = checkpoint['max_accuracy']
if 'search_space' in checkpoint and search_space is not None:
search_space.load_state_dict(checkpoint['search_space'])
logger.info(
f"=> Found existing search space: {search_space})")
logger.info(
f"=> loaded search space successfully")
if 'epoch' in checkpoint:
config.defrost()
config.TRAIN.START_EPOCH = checkpoint['epoch'] + 1
config.freeze()
del checkpoint
torch.cuda.empty_cache()
return max_accuracy
# Path: utils.py
class NativeScalerWithGradNormCount:
state_dict_key = "amp_scaler"
def __init__(self):
self._scaler = torch.cuda.amp.GradScaler()
def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):
self._scaler.scale(loss).backward(create_graph=create_graph)
if update_grad:
if clip_grad is not None:
assert parameters is not None
# unscale the gradients of optimizer's assigned params in-place
self._scaler.unscale_(optimizer)
norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)
else:
self._scaler.unscale_(optimizer)
norm = ampscaler_get_grad_norm(parameters)
self._scaler.step(optimizer)
self._scaler.update()
else:
norm = None
return norm
def state_dict(self):
return self._scaler.state_dict()
def load_state_dict(self, state_dict):
self._scaler.load_state_dict(state_dict)
# Path: save_logits.py
import os
import time
import random
import argparse
import datetime
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from collections import defaultdict
from timm.utils import accuracy
from my_meter import AverageMeter
from config import get_config
from models import build_model
from data import build_loader
from logger import create_logger
from utils import load_checkpoint, NativeScalerWithGradNormCount
# --------------------------------------------------------
# TinyViT Save Teacher Logits
# Copyright (c) 2022 Microsoft
# Based on the code: Swin Transformer
# (https://github.com/microsoft/swin-transformer)
# Save teacher logits
# --------------------------------------------------------
def parse_option():
parser = argparse.ArgumentParser(
'Swin Transformer training and evaluation script', add_help=False)
parser.add_argument('--cfg', type=str, required=True,
metavar="FILE", help='path to config file', )
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs='+',
)
# easy config modification
parser.add_argument('--batch-size', type=int,
help="batch size for single GPU")
parser.add_argument('--data-path', type=str, help='path to dataset')
parser.add_argument('--pretrained',
help='pretrained weight from checkpoint, could be imagenet22k pretrained weight')
parser.add_argument('--resume', help='resume from checkpoint')
parser.add_argument('--accumulation-steps', type=int,
help="gradient accumulation steps")
parser.add_argument('--use-checkpoint', action='store_true',
help="whether to use gradient checkpointing to save memory")
parser.add_argument('--disable_amp', action='store_true',
help='Disable pytorch amp')
parser.add_argument('--output', default='output', type=str, metavar='PATH',
help='root of output folder, the full path is <output>/<model_name>/<tag> (default: output)')
| parser.add_argument('--tag', help='tag of experiment') |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: fw-ai/fireworks_poe_bot
# Path: fireworks_poe_bot/fastapi_poe/base.py
class PoeBot:
# Override these for your bot
async def get_response(
self, query: QueryRequest
) -> AsyncIterable[Union[PartialResponse, ServerSentEvent]]:
"""Override this to return a response to user queries."""
yield self.text_event("hello")
async def get_settings(self, setting: SettingsRequest) -> SettingsResponse:
"""Override this to return non-standard settings."""
return SettingsResponse()
async def on_feedback(self, feedback_request: ReportFeedbackRequest) -> None:
"""Override this to record feedback from the user."""
pass
async def on_error(self, error_request: ReportErrorRequest) -> None:
"""Override this to record errors from the Poe server."""
logger.error(f"Error from Poe server: {error_request}")
# Helpers for generating responses
@staticmethod
def text_event(text: str) -> ServerSentEvent:
return ServerSentEvent(data=json.dumps({"text": text}), event="text")
@staticmethod
def replace_response_event(text: str) -> ServerSentEvent:
return ServerSentEvent(
data=json.dumps({"text": text}), event="replace_response"
)
@staticmethod
def done_event() -> ServerSentEvent:
return ServerSentEvent(data="{}", event="done")
@staticmethod
def suggested_reply_event(text: str) -> ServerSentEvent:
return ServerSentEvent(data=json.dumps({"text": text}), event="suggested_reply")
@staticmethod
def meta_event(
*,
content_type: ContentType = "text/markdown",
refetch_settings: bool = False,
linkify: bool = True,
suggested_replies: bool = True,
) -> ServerSentEvent:
return ServerSentEvent(
data=json.dumps(
{
"content_type": content_type,
"refetch_settings": refetch_settings,
"linkify": linkify,
"suggested_replies": suggested_replies,
}
),
event="meta",
)
@staticmethod
def error_event(
text: Optional[str] = None, *, allow_retry: bool = True
) -> ServerSentEvent:
data: Dict[str, Union[bool, str]] = {"allow_retry": allow_retry}
if text is not None:
data["text"] = text
return ServerSentEvent(data=json.dumps(data), event="error")
# Internal handlers
async def handle_report_feedback(
self, feedback_request: ReportFeedbackRequest
) -> JSONResponse:
await self.on_feedback(feedback_request)
return JSONResponse({})
async def handle_report_error(
self, error_request: ReportErrorRequest
) -> JSONResponse:
await self.on_error(error_request)
return JSONResponse({})
async def handle_settings(self, settings_request: SettingsRequest) -> JSONResponse:
settings = await self.get_settings(settings_request)
return JSONResponse(settings.dict())
async def handle_query(self, query: QueryRequest) -> AsyncIterable[ServerSentEvent]:
try:
async for event in self.get_response(query):
if isinstance(event, ServerSentEvent):
yield event
elif isinstance(event, ErrorResponse):
yield self.error_event(event.text, allow_retry=event.allow_retry)
elif isinstance(event, MetaResponse):
yield self.meta_event(
content_type=event.content_type,
refetch_settings=event.refetch_settings,
linkify=event.linkify,
suggested_replies=event.suggested_replies,
)
elif event.is_suggested_reply:
yield self.suggested_reply_event(event.text)
elif event.is_replace_response:
yield self.replace_response_event(event.text)
else:
yield self.text_event(event.text)
except Exception as e:
logger.exception("Error responding to query")
yield self.error_event(repr(e), allow_retry=False)
yield self.done_event()
# Path: fireworks_poe_bot/fastapi_poe/types.py
class PartialResponse(BaseModel):
"""Representation of a (possibly partial) response from a bot."""
text: str
"""Partial response text.
If the final bot response is "ABC", you may see a sequence
of PartialResponse objects like PartialResponse(text="A"),
PartialResponse(text="B"), PartialResponse(text="C").
"""
raw_response: object = None
"""For debugging, the raw response from the bot."""
full_prompt: Optional[str] = None
"""For debugging, contains the full prompt as sent to the bot."""
request_id: Optional[str] = None
"""May be set to an internal identifier for the request."""
is_suggested_reply: bool = False
"""If true, this is a suggested reply."""
is_replace_response: bool = False
"""If true, this text should completely replace the previous bot text."""
# Path: fireworks_poe_bot/fastapi_poe/types.py
class QueryRequest(BaseRequest):
"""Request parameters for a query request."""
query: List[ProtocolMessage]
user_id: Identifier
conversation_id: Identifier
message_id: Identifier
metadata: Identifier = ""
api_key: str = "<missing>"
access_key: str = "<missing>"
temperature: float = 0.7
skip_system_prompt: bool = False
logit_bias: Dict[str, float] = {}
stop_sequences: List[str] = []
# Path: fireworks_poe_bot/fastapi_poe/types.py
class ReportErrorRequest(BaseRequest):
"""Request parameters for a report_error request."""
message: str
metadata: Dict[str, Any]
# Path: fireworks_poe_bot/fastapi_poe/types.py
class ReportFeedbackRequest(BaseRequest):
"""Request parameters for a report_feedback request."""
message_id: Identifier
user_id: Identifier
conversation_id: Identifier
feedback_type: FeedbackType
# Path: fireworks_poe_bot/fastapi_poe/types.py
class SettingsRequest(BaseRequest):
"""Request parameters for a settings request."""
# Path: fireworks_poe_bot/fastapi_poe/types.py
class SettingsResponse(BaseModel):
context_clear_window_secs: Optional[int] = None # deprecated
allow_user_context_clear: bool = True # deprecated
server_bot_dependencies: Dict[str, int] = Field(default_factory=dict)
allow_attachments: bool = False
introduction_message: str = ""
# Path: fireworks_poe_bot/fastapi_poe/types.py
class ErrorResponse(PartialResponse):
"""Communicate errors from server bots."""
allow_retry: bool = False
# Path: fireworks_poe_bot/plugin.py
@abstractmethod
def log_error(self, payload: Dict[str, Any]):
...
# Path: fireworks_poe_bot/plugin.py
@abstractmethod
def log_info(self, payload: Dict[str, Any]):
...
# Path: fireworks_poe_bot/plugin.py
@abstractmethod
def log_warn(self, payload: Dict[str, Any]):
...
# Path: fireworks_poe_bot/plugin.py
def register_bot_plugin(config_key: str, BotConfigClass: type = ModelConfig):
def decorator(BotPluginClass: type):
BOT_PLUGINS.append(_BotPlugin(
BotPluginClass=BotPluginClass,
BotConfigClass=BotConfigClass,
config_key=config_key,
))
return decorator
# Path: fireworks_poe_bot/config.py
class ModelConfig(BaseModel):
model: str
api_key: str
SERVER_endpoint_account_override: Optional[str] = None
SERVER_endpoint_model_override: Optional[str] = None
@property
def model_fqn(self):
if (
self.SERVER_endpoint_account_override is not None
or self.SERVER_endpoint_model_override is not None
):
_, account, _, model = self.model.split("/")
account = self.SERVER_endpoint_account_override or account
model = self.SERVER_endpoint_model_override or model
return f"accounts/{account}/models/{model}"
else:
return self.model
# Path: fireworks_poe_bot/fw_poe_text_bot.py
import copy
import fireworks.client
import time
import io
import base64
import httpx
import traceback
from typing import AsyncIterable, Dict, List, Optional, Union, Any
from .fastapi_poe import PoeBot
from sse_starlette.sse import ServerSentEvent
from .fastapi_poe.types import (
PartialResponse,
QueryRequest,
ReportErrorRequest,
ReportFeedbackRequest,
SettingsRequest,
SettingsResponse,
ErrorResponse,
)
from fireworks.client import ChatCompletion
from fireworks.client.api import ChatCompletionResponseStreamChoice, ChatMessage
from fireworks.client.error import InvalidRequestError
from fireworks_poe_bot.plugin import log_error, log_info, log_warn, register_bot_plugin
from fireworks_poe_bot.config import ModelConfig
from typing import Callable
from itertools import groupby
from PIL import Image
if user_message is not None:
user_message["role"] = "input"
# HACKS: move the image to the instruction message
if isinstance(user_message["content"], list):
content_non_image = [x for x in user_message['content'] if (not isinstance(x, dict)) or x["type"] != "image_url"]
content_image = [x for x in user_message['content'] if isinstance(x, dict) and x["type"] == "image_url"]
if content_image:
new_messages[-1]["content"].append(content_image[0])
user_message["content"] = content_non_image
new_messages.append(user_message)
else:
if user_message is not None:
new_messages.append(user_message)
messages = new_messages
self._log_info(
{
"msg": "Request received",
**query.dict(),
}
)
if self.chat_format != "alpaca":
# The poe servers send us arbitrary lists of messages. We need to do a few things
# to normalize for our chat completion API:
# 1. Ensure that all assistant messages are preceded by a user message
# 2. Merge adjacent messages from the same role
# 3. Ensure that the last message is a user message
# Ensure that all assistant messages are preceded by a user message
for i in range(len(messages) - 1, -1, -1):
if messages[i]["role"] == "assistant" and (
i == 0 or messages[i - 1]["role"] != "user"
):
self._log_warn(
{
"msg": f"Assistant message {messages[i]} not preceded by user message"
}
)
messages.insert(i, {"role": "user", "content": ""})
# Merge adjacent messages from the same role
merged_messages = []
# Now there could be images in the messages, in which case the message content is a list
def merge_messages_groups(
message_group: List[Union[str, List[Dict[str, Any]]]]
) -> Union[str, List[Dict[str, Any]]]:
text = []
images = []
for msg in message_group:
if isinstance(msg, str):
text.append(msg)
elif isinstance(msg, list):
assert msg[0]["type"] == "text"
text.append(msg[0]["text"])
images.extend(msg[1:])
if images:
return [{"type": "text", "text": " ".join(text)}, *images]
return " ".join(text)
for role, group in groupby(messages, key=lambda x: x["role"]):
content = merge_messages_groups([message["content"] for message in group])
merged_messages.append({"role": role, "content": content})
messages = merged_messages
# Ensure last message is a user message
if messages[-1]["role"] != "user":
self._log_warn({"msg": f"Last message {messages[-1]} not a user message"})
messages.append({"role": "user", "content": ""})
additional_args = copy.deepcopy(self.additional_args)
if "stop" in additional_args:
stop_seqs = additional_args["stop"]
additional_args.pop("stop")
else:
stop_seqs = query.stop_sequences[:4]
generated_len = 0
complete_response = ""
async for response in self.completion_async_method(
model=self.model,
messages=messages,
stream=True,
request_timeout=600,
temperature=query.temperature,
stop=stop_seqs,
max_tokens=self.max_tokens,
prompt_truncate_len=self.prompt_truncate_len,
**additional_args,
):
# Step 3: Transform the CompletionStreamResponse into PartialResponse format
for choice in response.choices:
assert isinstance(choice, ChatCompletionResponseStreamChoice)
if choice.delta.content is None:
continue
generated_len += len(choice.delta.content)
complete_response += choice.delta.content
yield PartialResponse(
text=choice.delta.content,
raw_response=response,
request_id=response.id,
)
end_t = time.time()
elapsed_sec = end_t - start_t
self._log_info(
{
"severity": "INFO",
"msg": "Request completed",
"query": query.dict(),
"response": complete_response,
"generated_len": generated_len,
"elapsed_sec": elapsed_sec,
}
)
yield ServerSentEvent(event="done")
return
except Exception as e:
| end_t = time.time() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Fsoft-AIC/LSDM
# Path: atiss/scene_synthesis/datasets/threed_front_scene.py
class Asset:
"""Contains the information for each 3D-FUTURE model."""
super_category: str
category: str
style: str
theme: str
material: str
@property
def label(self):
return self.category
# Path: atiss/scene_synthesis/datasets/threed_front_scene.py
class ModelInfo(object):
"""Contains all the information for all 3D-FUTURE models.
Arguments
---------
model_info_data: list of dictionaries containing the information
regarding the 3D-FUTURE models.
"""
def __init__(self, model_info_data):
self.model_info_data = model_info_data
self._model_info = None
# List to keep track of the different styles, themes
self._styles = []
self._themes = []
self._categories = []
self._super_categories = []
self._materials = []
@property
def model_info(self):
if self._model_info is None:
self._model_info = {}
# Create a dictionary of all models/assets in the dataset
for m in self.model_info_data:
# Keep track of the different styles
if m["style"] not in self._styles and m["style"] is not None:
self._styles.append(m["style"])
# Keep track of the different themes
if m["theme"] not in self._themes and m["theme"] is not None:
self._themes.append(m["theme"])
# Keep track of the different super-categories
if m["super-category"] not in self._super_categories and m["super-category"] is not None:
self._super_categories.append(m["super-category"])
# Keep track of the different categories
if m["category"] not in self._categories and m["category"] is not None:
self._categories.append(m["category"])
# Keep track of the different categories
if m["material"] not in self._materials and m["material"] is not None:
self._materials.append(m["material"])
super_cat = "unknown_super-category"
cat = "unknown_category"
if m["super-category"] is not None:
super_cat = m["super-category"].lower().replace(" / ", "/")
if m["category"] is not None:
cat = m["category"].lower().replace(" / ", "/")
self._model_info[m["model_id"]] = Asset(
super_cat,
cat,
m["style"],
m["theme"],
m["material"]
)
return self._model_info
@property
def styles(self):
return self._styles
@property
def themes(self):
return self._themes
@property
def materials(self):
return self._materials
@property
def categories(self):
return set([s.lower().replace(" / ", "/") for s in self._categories])
@property
def super_categories(self):
return set([
s.lower().replace(" / ", "/")
for s in self._super_categories
])
@classmethod
def from_file(cls, path_to_model_info):
with open(path_to_model_info, "rb") as f:
model_info = json.load(f)
return cls(model_info)
# Path: atiss/scene_synthesis/datasets/threed_front_scene.py
class Room(BaseScene):
def __init__(
self, scene_id, scene_type, bboxes, extras, json_path,
path_to_room_masks_dir=None
):
super().__init__(scene_id, scene_type, bboxes)
self.json_path = json_path
self.extras = extras
self.uid = "_".join([self.json_path, scene_id])
self.path_to_room_masks_dir = path_to_room_masks_dir
if path_to_room_masks_dir is not None:
self.path_to_room_mask = os.path.join(
self.path_to_room_masks_dir, self.uid, "room_mask.png"
)
else:
self.path_to_room_mask = None
@property
def floor(self):
return [ei for ei in self.extras if ei.model_type == "Floor"][0]
@property
@lru_cache(maxsize=512)
def bbox(self):
corners = np.empty((0, 3))
for f in self.bboxes:
corners = np.vstack([corners, f.corners()])
return np.min(corners, axis=0), np.max(corners, axis=0)
@cached_property
def bboxes_centroid(self):
a, b = self.bbox
return (a+b)/2
@property
def furniture_in_room(self):
return [f.label for f in self.bboxes]
@property
def floor_plan(self):
def cat_mesh(m1, m2):
v1, f1 = m1
v2, f2 = m2
v = np.vstack([v1, v2])
f = np.vstack([f1, f2 + len(v1)])
return v, f
# Compute the full floor plan
vertices, faces = reduce(
cat_mesh,
((ei.xyz, ei.faces) for ei in self.extras if ei.model_type == "Floor")
)
return np.copy(vertices), np.copy(faces)
@cached_property
def floor_plan_bbox(self):
vertices, faces = self.floor_plan
return np.min(vertices, axis=0), np.max(vertices, axis=0)
@cached_property
def floor_plan_centroid(self):
a, b = self.floor_plan_bbox
return (a+b)/2
@cached_property
def centroid(self):
return self.floor_plan_centroid
@property
def count_furniture_in_room(self):
return Counter(self.furniture_in_room)
@property
def room_mask(self):
return self.room_mask_rotated(0)
def room_mask_rotated(self, angle=0):
# The angle is in rad
im = Image.open(self.path_to_room_mask).convert("RGB")
# Downsample the room_mask image by applying bilinear interpolation
im = im.rotate(angle * 180 / np.pi, resample=Image.BICUBIC)
return np.asarray(im).astype(np.float32) / np.float32(255)
def category_counts(self, class_labels):
"""List of category counts in the room
"""
print(class_labels)
if "start" in class_labels and "end" in class_labels:
class_labels = class_labels[:-2]
category_counts = [0]*len(class_labels)
for di in self.furniture_in_room:
category_counts[class_labels.index(di)] += 1
return category_counts
def ordered_bboxes_with_centroid(self):
centroids = np.array([f.centroid(-self.centroid) for f in self.bboxes])
ordering = np.lexsort(centroids.T)
ordered_bboxes = [self.bboxes[i] for i in ordering]
return ordered_bboxes
def ordered_bboxes_with_class_labels(self, all_labels):
centroids = np.array([f.centroid(-self.centroid) for f in self.bboxes])
int_labels = np.array(
[[f.int_label(all_labels)] for f in self.bboxes]
)
ordering = np.lexsort(np.hstack([centroids, int_labels]).T)
ordered_bboxes = [self.bboxes[i] for i in ordering]
return ordered_bboxes
def ordered_bboxes_with_class_frequencies(self, class_order):
centroids = np.array([f.centroid(-self.centroid) for f in self.bboxes])
label_order = np.array([
[class_order[f.label]] for f in self.bboxes
])
ordering = np.lexsort(np.hstack([centroids, label_order]).T)
ordered_bboxes = [self.bboxes[i] for i in ordering[::-1]]
return ordered_bboxes
def furniture_renderables(
self,
colors=(0.5, 0.5, 0.5),
with_bbox_corners=False,
with_origin=False,
with_bboxes=False,
with_objects_offset=False,
with_floor_plan_offset=False,
with_floor_plan=False,
with_texture=False
):
if with_objects_offset:
offset = -self.bboxes_centroid
elif with_floor_plan_offset:
offset = -self.floor_plan_centroid
else:
offset = [[0, 0, 0]]
renderables = [
f.mesh_renderable(
colors=colors, offset=offset, with_texture=with_texture
)
for f in self.bboxes
]
if with_origin:
renderables += [f.origin_renderable(offset) for f in self.bboxes]
if with_bbox_corners:
for f in self.bboxes:
renderables += [f.bbox_corners_renderable(offset=offset)]
if with_bboxes:
for f in self.bboxes:
renderables += [f.bbox_renderable(offset=offset)]
if with_floor_plan:
vertices, faces = self.floor_plan
vertices = vertices + offset
renderables += [
Mesh.from_faces(vertices, faces, colors=(0.8, 0.8, 0.8, 0.6))
]
return renderables
def show(
self,
behaviours=[LightToCamera(), SnapshotOnKey()],
with_bbox_corners=False,
with_bboxes=False,
with_objects_offset=False,
with_floor_plan_offset=False,
with_floor_plan=False,
background=(1.0, 1.0, 1.0, 1.0),
camera_target=(0, 0, 0),
camera_position=(-2, -2, -2),
up_vector=(0, 0, 1),
window_size=(512, 512)
):
renderables = self.furniture_renderables(
with_bbox_corners=with_bbox_corners,
with_bboxes=with_bboxes,
with_objects_offset=with_objects_offset,
with_floor_plan_offset=with_floor_plan_offset,
with_floor_plan=with_floor_plan
)
show(
renderables, behaviours=behaviours,
size=window_size, camera_position=camera_position,
camera_target=camera_target, up_vector=up_vector,
background=background
)
def augment_room(self, objects_dataset):
bboxes = self.bboxes
# Randomly pick an asset to be augmented
bi = np.random.choice(self.bboxes)
query_label = bi.label
query_size = bi.size + np.random.normal(0, 0.02)
# Retrieve the new asset based on the size of the picked asset
furniture = objects_dataset.get_closest_furniture_to_box(
query_label, query_size
)
bi_retrieved = bi.copy_from_other_model(furniture)
new_bboxes = [
box for box in bboxes if not box == bi
] + [bi_retrieved]
return Room(
scene_id=self.scene_id + "_augm",
scene_type=self.scene_type,
bboxes=new_bboxes,
extras=self.extras,
json_path=self.json_path,
path_to_room_masks_dir=self.path_to_room_masks_dir
)
# Path: atiss/scene_synthesis/datasets/threed_front_scene.py
class ThreedFutureModel(BaseThreedFutureModel):
def __init__(
self,
model_uid,
model_jid,
model_info,
position,
rotation,
scale,
path_to_models
):
super().__init__(model_uid, model_jid, position, rotation, scale)
self.model_info = model_info
self.path_to_models = path_to_models
self._label = None
@property
def raw_model_path(self):
return os.path.join(
self.path_to_models,
self.model_jid,
"raw_model.obj"
)
@property
def texture_image_path(self):
return os.path.join(
self.path_to_models,
self.model_jid,
"texture.png"
)
@property
def path_to_bbox_vertices(self):
return os.path.join(
self.path_to_models,
self.model_jid,
"bbox_vertices.npy"
)
def raw_model(self):
try:
return trimesh.load(
self.raw_model_path,
process=False,
force="mesh",
skip_materials=True,
skip_texture=True
)
except:
import pdb
pdb.set_trace()
print("Loading model failed", flush=True)
print(self.raw_model_path, flush=True)
raise
def raw_model_transformed(self, offset=[[0, 0, 0]]):
model = self.raw_model()
faces = np.array(model.faces)
vertices = self._transform(np.array(model.vertices)) + offset
return trimesh.Trimesh(vertices, faces)
def centroid(self, offset=[[0, 0, 0]]):
return self.corners(offset).mean(axis=0)
@cached_property
def size(self):
corners = self.corners()
return np.array([
np.sqrt(np.sum((corners[4]-corners[0])**2))/2,
np.sqrt(np.sum((corners[2]-corners[0])**2))/2,
np.sqrt(np.sum((corners[1]-corners[0])**2))/2
])
def bottom_center(self, offset=[[0, 0, 0]]):
centroid = self.centroid(offset)
size = self.size
return np.array([centroid[0], centroid[1]-size[1], centroid[2]])
@cached_property
def bottom_size(self):
return self.size * [1, 2, 1]
@cached_property
def z_angle(self):
# See BaseThreedFutureModel._transform for the origin of the following
# code.
ref = [0, 0, 1]
axis = np.cross(ref, self.rotation[1:])
theta = np.arccos(np.dot(ref, self.rotation[1:]))*2
if np.sum(axis) == 0 or np.isnan(theta):
return 0
assert np.dot(axis, [1, 0, 1]) == 0
assert 0 <= theta <= 2*np.pi
if theta >= np.pi:
theta = theta - 2*np.pi
return np.sign(axis[1]) * theta
@property
def label(self):
if self._label is None:
self._label = self.model_info.label
return self._label
@label.setter
def label(self, _label):
self._label = _label
def corners(self, offset=[[0, 0, 0]]):
try:
bbox_vertices = np.load(self.path_to_bbox_vertices, mmap_mode="r")
except:
bbox_vertices = np.array(self.raw_model().bounding_box.vertices)
np.save(self.path_to_bbox_vertices, bbox_vertices)
c = self._transform(bbox_vertices)
return c + offset
def origin_renderable(self, offset=[[0, 0, 0]]):
corners = self.corners(offset)
return Lines(
[
corners[0], corners[4],
corners[0], corners[2],
corners[0], corners[1]
],
colors=np.array([
[1.0, 0.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0]
]),
width=0.02
)
def bbox_corners_renderable(
self, sizes=0.1, colors=(1, 0, 0), offset=[[0, 0, 0]]
):
return Spherecloud(self.corners(offset), sizes=sizes, colors=colors)
def bbox_renderable(
self, colors=(0.00392157, 0., 0.40392157, 1.), offset=[[0, 0, 0]]
):
alpha = np.array(self.size)[None]
epsilon = np.ones((1, 2)) * 0.1
translation = np.array(self.centroid(offset))[None]
R = np.zeros((1, 3, 3))
theta = np.array(self.z_angle)
R[:, 0, 0] = np.cos(theta)
R[:, 0, 2] = -np.sin(theta)
R[:, 2, 0] = np.sin(theta)
R[:, 2, 2] = np.cos(theta)
R[:, 1, 1] = 1.
return Mesh.from_superquadrics(alpha, epsilon, translation, R, colors)
def show(
self,
behaviours=[LightToCamera()],
with_bbox_corners=False,
offset=[[0, 0, 0]]
):
renderables = self.mesh_renderable(offset=offset)
if with_bbox_corners:
renderables += [self.bbox_corners_renderable(offset=offset)]
show(renderables, behaviours=behaviours)
def one_hot_label(self, all_labels):
return np.eye(len(all_labels))[self.int_label(all_labels)]
def int_label(self, all_labels):
return all_labels.index(self.label)
def copy_from_other_model(self, other_model):
model = ThreedFutureModel(
model_uid=other_model.model_uid,
model_jid=other_model.model_jid,
model_info=other_model.model_info,
position=self.position,
rotation=self.rotation,
scale=other_model.scale,
path_to_models=self.path_to_models
)
model.label = self.label
return model
# Path: atiss/scene_synthesis/datasets/threed_front_scene.py
class ThreedFutureExtra(BaseThreedFutureModel):
def __init__(
self,
model_uid,
model_jid,
xyz,
faces,
model_type,
position,
rotation,
scale
):
super().__init__(model_uid, model_jid, position, rotation, scale)
self.xyz = xyz
self.faces = faces
self.model_type = model_type
def raw_model_transformed(self, offset=[[0, 0, 0]]):
vertices = self._transform(np.array(self.xyz)) + offset
faces = np.array(self.faces)
return trimesh.Trimesh(vertices, faces)
def show(
self, behaviours=[LightToCamera(), SnapshotOnKey()], offset=[[0, 0, 0]]
):
renderables = self.mesh_renderable(offset=offset)
show(renderables, behaviours)
# Path: atiss/scene_synthesis/datasets/utils.py
from collections import defaultdict
from .threed_front_scene import Asset, ModelInfo, Room, ThreedFutureModel, \
ThreedFutureExtra
import numpy as np
import json
import os
import pickle
# windows etc.
meshes_in_scene = defaultdict()
for mm in data["mesh"]:
meshes_in_scene[mm["uid"]] = dict(
mesh_uid=mm["uid"],
mesh_jid=mm["jid"],
mesh_xyz=np.asarray(mm["xyz"]).reshape(-1, 3),
mesh_faces=np.asarray(mm["faces"]).reshape(-1, 3),
mesh_type=mm["type"]
)
# Parse the rooms of the scene
scene = data["scene"]
# Keep track of the parsed rooms
rooms = []
for rr in scene["room"]:
# Keep track of the furniture in the room
furniture_in_room = []
# Keep track of the extra meshes in the room
extra_meshes_in_room = []
# Flag to keep track of invalid scenes
is_valid_scene = True
for cc in rr["children"]:
if cc["ref"] in furniture_in_scene:
tf = furniture_in_scene[cc["ref"]]
# If scale is very small/big ignore this scene
if any(si < 1e-5 for si in cc["scale"]):
is_valid_scene = False
break
if any(si > 5 for si in cc["scale"]):
is_valid_scene = False
break
furniture_in_room.append(ThreedFutureModel(
tf["model_uid"],
tf["model_jid"],
tf["model_info"],
cc["pos"],
cc["rot"],
cc["scale"],
path_to_models
))
elif cc["ref"] in meshes_in_scene:
mf = meshes_in_scene[cc["ref"]]
extra_meshes_in_room.append(ThreedFutureExtra(
mf["mesh_uid"],
mf["mesh_jid"],
mf["mesh_xyz"],
mf["mesh_faces"],
mf["mesh_type"],
cc["pos"],
cc["rot"],
cc["scale"]
))
else:
continue
if len(furniture_in_room) > 1 and is_valid_scene:
# Check whether a room with the same instanceid has
# already been added to the list of rooms
if rr["instanceid"] not in unique_room_ids:
unique_room_ids.add(rr["instanceid"])
# Add to the list
rooms.append(Room(
rr["instanceid"], # scene_id
rr["type"].lower(), # scene_type
furniture_in_room, # bounding boxes
extra_meshes_in_room, # extras e.g. walls
m.split("/")[-1].split(".")[0], # json_path
path_to_room_masks_dir
))
scenes.append(rooms)
s = "{:5d} / {:5d}".format(i, len(path_to_scene_layouts))
print(s, flush=True, end="\b"*len(s))
print()
scenes = sum(scenes, [])
pickle.dump(scenes, open("/tmp/threed_front.pkl", "wb"))
return scenes
def parse_threed_future_models(
dataset_directory, path_to_models, path_to_model_info
):
if os.getenv("PATH_TO_3D_FUTURE_OBJECTS"):
furnitures = pickle.load(
open(os.getenv("PATH_TO_3D_FUTURE_OBJECTS"), "rb")
)
else:
# Parse the model info
mf = ModelInfo.from_file(path_to_model_info)
model_info = mf.model_info
path_to_scene_layouts = [
os.path.join(dataset_directory, f)
for f in sorted(os.listdir(dataset_directory))
if f.endswith(".json")
]
# List to keep track of all available furniture in the dataset
furnitures = []
unique_furniture_ids = set()
# Start parsing the dataset
print("Loading dataset ", end="")
for i, m in enumerate(path_to_scene_layouts):
with open(m) as f:
data = json.load(f)
# Parse the furniture of the scene
furniture_in_scene = defaultdict()
for ff in data["furniture"]:
if "valid" in ff and ff["valid"]:
furniture_in_scene[ff["uid"]] = dict(
model_uid=ff["uid"],
model_jid=ff["jid"],
model_info=model_info[ff["jid"]]
)
# Parse the rooms of the scene
scene = data["scene"]
for rr in scene["room"]:
# Flag to keep track of invalid scenes
| is_valid_scene = True |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Harvard-Ophthalmology-AI-Lab/FairSeg
# Path: SAMed/segment_anything/modeling/image_encoder.py
class ImageEncoderViT(nn.Module):
def __init__(
self,
img_size: int = 1024,
patch_size: int = 16,
in_chans: int = 3,
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
mlp_ratio: float = 4.0,
out_chans: int = 256,
qkv_bias: bool = True,
norm_layer: Type[nn.Module] = nn.LayerNorm,
act_layer: Type[nn.Module] = nn.GELU,
use_abs_pos: bool = True,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
window_size: int = 0,
global_attn_indexes: Tuple[int, ...] = (),
) -> None:
"""
Args:
img_size (int): Input image size.
patch_size (int): Patch size.
in_chans (int): Number of input image channels.
embed_dim (int): Patch embedding dimension.
depth (int): Depth of ViT.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_abs_pos (bool): If True, use absolute positional embeddings.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks.
global_attn_indexes (list): Indexes for blocks using global attention.
"""
super().__init__()
self.img_size = img_size
self.patch_embed = PatchEmbed(
kernel_size=(patch_size, patch_size),
stride=(patch_size, patch_size),
in_chans=in_chans,
embed_dim=embed_dim,
)
self.pos_embed: Optional[nn.Parameter] = None
if use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
self.pos_embed = nn.Parameter(
torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)
)
self.blocks = nn.ModuleList()
for i in range(depth):
block = Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
norm_layer=norm_layer,
act_layer=act_layer,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
window_size=window_size if i not in global_attn_indexes else 0,
input_size=(img_size // patch_size, img_size // patch_size),
)
self.blocks.append(block)
self.neck = nn.Sequential(
nn.Conv2d(
embed_dim,
out_chans,
kernel_size=1,
bias=False,
),
LayerNorm2d(out_chans),
nn.Conv2d(
out_chans,
out_chans,
kernel_size=3,
padding=1,
bias=False,
),
LayerNorm2d(out_chans),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.patch_embed(x) # pre embed: [1, 3, 1024, 1024], post embed: [1, 64, 64, 768]
if self.pos_embed is not None:
x = x + self.pos_embed
for blk in self.blocks:
x = blk(x)
x = self.neck(x.permute(0, 3, 1, 2)) # [b, c, h, w], [1, 256, 64, 64]
return x
# Path: SAMed/segment_anything/modeling/mask_decoder.py
class MaskDecoder(nn.Module):
def __init__(
self,
*,
transformer_dim: int,
transformer: nn.Module,
num_multimask_outputs: int = 3,
activation: Type[nn.Module] = nn.GELU,
iou_head_depth: int = 3,
iou_head_hidden_dim: int = 256,
) -> None:
"""
Predicts masks given an image and prompt embeddings, using a
tranformer architecture.
Arguments:
transformer_dim (int): the channel dimension of the transformer
transformer (nn.Module): the transformer used to predict masks
num_multimask_outputs (int): the number of masks to predict
when disambiguating masks
activation (nn.Module): the type of activation to use when
upscaling masks
iou_head_depth (int): the depth of the MLP used to predict
mask quality
iou_head_hidden_dim (int): the hidden dimension of the MLP
used to predict mask quality
"""
super().__init__()
self.transformer_dim = transformer_dim
self.transformer = transformer
self.num_multimask_outputs = num_multimask_outputs
self.iou_token = nn.Embedding(1, transformer_dim)
self.num_mask_tokens = num_multimask_outputs + 1
self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
self.output_upscaling = nn.Sequential(
nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),
LayerNorm2d(transformer_dim // 4),
activation(),
nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),
activation(),
)
self.output_hypernetworks_mlps = nn.ModuleList(
[
MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)
for i in range(self.num_mask_tokens)
]
)
self.iou_prediction_head = MLP(
transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth
)
def forward(
self,
image_embeddings: torch.Tensor,
image_pe: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
multimask_output: bool,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Predict masks given image and prompt embeddings.
Arguments:
image_embeddings (torch.Tensor): the embeddings from the image encoder
image_pe (torch.Tensor): positional encoding with the shape of image_embeddings
sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes
dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs
multimask_output (bool): Whether to return multiple masks or a single
mask.
Returns:
torch.Tensor: batched predicted masks
torch.Tensor: batched predictions of mask quality
"""
masks, iou_pred = self.predict_masks(
image_embeddings=image_embeddings,
image_pe=image_pe,
sparse_prompt_embeddings=sparse_prompt_embeddings,
dense_prompt_embeddings=dense_prompt_embeddings,
)
# Select the correct mask or masks for output
# if multimask_output:
# mask_slice = slice(1, None)
# else:
# mask_slice = slice(0, 1)
# masks = masks[:, mask_slice, :, :]
# iou_pred = iou_pred[:, mask_slice]
# Prepare output
return masks, iou_pred
def predict_masks(
self,
image_embeddings: torch.Tensor,
image_pe: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Predicts masks. See 'forward' for more details."""
# Concatenate output tokens
output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)
output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)
tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
# Expand per-image data in batch direction to be per-mask
src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
src = src + dense_prompt_embeddings
pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
b, c, h, w = src.shape
# Run the transformer
hs, src = self.transformer(src, pos_src, tokens)
iou_token_out = hs[:, 0, :]
mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]
# Upscale mask embeddings and predict masks using the mask tokens
src = src.transpose(1, 2).view(b, c, h, w)
upscaled_embedding = self.output_upscaling(src)
hyper_in_list: List[torch.Tensor] = []
for i in range(self.num_mask_tokens):
hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))
hyper_in = torch.stack(hyper_in_list, dim=1) # [b, c, token_num]
b, c, h, w = upscaled_embedding.shape # [h, token_num, h, w]
masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w) # [1, 4, 256, 256], 256 = 4 * 64, the size of image embeddings
# Generate mask quality predictions
iou_pred = self.iou_prediction_head(iou_token_out)
return masks, iou_pred
# Path: SAMed/segment_anything/modeling/prompt_encoder.py
class PromptEncoder(nn.Module):
def __init__(
self,
embed_dim: int,
image_embedding_size: Tuple[int, int],
input_image_size: Tuple[int, int],
mask_in_chans: int,
activation: Type[nn.Module] = nn.GELU,
) -> None:
"""
Encodes prompts for input to SAM's mask decoder.
Arguments:
embed_dim (int): The prompts' embedding dimension
image_embedding_size (tuple(int, int)): The spatial size of the
image embedding, as (H, W).
input_image_size (int): The padded size of the image as input
to the image encoder, as (H, W).
mask_in_chans (int): The number of hidden channels used for
encoding input masks.
activation (nn.Module): The activation to use when encoding
input masks.
"""
super().__init__()
self.embed_dim = embed_dim
self.input_image_size = input_image_size
self.image_embedding_size = image_embedding_size
self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)
self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners
point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)]
self.point_embeddings = nn.ModuleList(point_embeddings)
self.not_a_point_embed = nn.Embedding(1, embed_dim)
self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1])
self.mask_downscaling = nn.Sequential(
nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),
LayerNorm2d(mask_in_chans // 4),
activation(),
nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),
LayerNorm2d(mask_in_chans),
activation(),
nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),
) # downsample to 1/4
self.no_mask_embed = nn.Embedding(1, embed_dim)
def get_dense_pe(self) -> torch.Tensor:
"""
Returns the positional encoding used to encode point prompts,
applied to a dense set of points the shape of the image encoding.
Returns:
torch.Tensor: Positional encoding with shape
1x(embed_dim)x(embedding_h)x(embedding_w)
"""
return self.pe_layer(self.image_embedding_size).unsqueeze(0)
def _embed_points(
self,
points: torch.Tensor,
labels: torch.Tensor,
pad: bool,
) -> torch.Tensor:
"""Embeds point prompts."""
points = points + 0.5 # Shift to center of pixel
if pad:
padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)
padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)
points = torch.cat([points, padding_point], dim=1)
labels = torch.cat([labels, padding_label], dim=1)
point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)
point_embedding[labels == -1] = 0.0
point_embedding[labels == -1] += self.not_a_point_embed.weight
point_embedding[labels == 0] += self.point_embeddings[0].weight
point_embedding[labels == 1] += self.point_embeddings[1].weight
return point_embedding
def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
"""Embeds box prompts."""
boxes = boxes + 0.5 # Shift to center of pixel
coords = boxes.reshape(-1, 2, 2)
corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)
corner_embedding[:, 0, :] += self.point_embeddings[2].weight
corner_embedding[:, 1, :] += self.point_embeddings[3].weight
return corner_embedding
def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:
"""Embeds mask inputs."""
mask_embedding = self.mask_downscaling(masks)
return mask_embedding
def _get_batch_size(
self,
points: Optional[Tuple[torch.Tensor, torch.Tensor]],
boxes: Optional[torch.Tensor],
masks: Optional[torch.Tensor],
) -> int:
"""
Gets the batch size of the output given the batch size of the input prompts.
"""
if points is not None:
return points[0].shape[0]
elif boxes is not None:
return boxes.shape[0]
elif masks is not None:
return masks.shape[0]
else:
return 1
def _get_device(self) -> torch.device:
return self.point_embeddings[0].weight.device
def forward(
self,
points: Optional[Tuple[torch.Tensor, torch.Tensor]],
boxes: Optional[torch.Tensor],
masks: Optional[torch.Tensor],
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Embeds different types of prompts, returning both sparse and dense
embeddings.
Arguments:
points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates
and labels to embed.
boxes (torch.Tensor or none): boxes to embed
masks (torch.Tensor or none): masks to embed
Returns:
torch.Tensor: sparse embeddings for the points and boxes, with shape
BxNx(embed_dim), where N is determined by the number of input points
and boxes.
torch.Tensor: dense embeddings for the masks, in the shape
Bx(embed_dim)x(embed_H)x(embed_W)
"""
bs = self._get_batch_size(points, boxes, masks)
sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device())
if points is not None:
coords, labels = points
point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))
sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)
if boxes is not None:
box_embeddings = self._embed_boxes(boxes)
sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)
if masks is not None:
dense_embeddings = self._embed_masks(masks)
else:
dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]
)
return sparse_embeddings, dense_embeddings
# Path: SAMed/segment_anything/modeling/sam.py
import torch
from torch import nn
from torch.nn import functional as F
from icecream import ic
from typing import Any, Dict, List, Tuple
from .image_encoder import ImageEncoderViT
from .mask_decoder import MaskDecoder
from .prompt_encoder import PromptEncoder
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class Sam(nn.Module):
mask_threshold: float = 0.0
image_format: str = "RGB"
def __init__(
self,
image_encoder: ImageEncoderViT,
prompt_encoder: PromptEncoder,
mask_decoder: MaskDecoder,
| pixel_mean: List[float] = [123.675, 116.28, 103.53], |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: microsoft/PLEX
# Path: PLEX/models/heads/distributions.py
class GaussianHead(nn.Module):
def __init__(self, input_dim, output_dim, std_bounds,
hidden_dim=None, squash=False):
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.std_bounds = std_bounds
self.squash = squash
self.mean_head = _head(input_dim, output_dim, hidden_dim=hidden_dim)
self.std_head = _head(input_dim, output_dim, hidden_dim=hidden_dim)
def forward(self, x):
mean = self.mean_head(x)
std = _rescale(self.std_head(x), *self.std_bounds)
dist = D.Normal(loc=mean, scale=std)
# dist = D.Independent(dist, 1) # diagonal
if self.squash:
dist = TanhWrappedDistribution(dist)
return dist
# Path: PLEX/models/heads/distributions.py
class GaussianMixtureHead(nn.Module):
def __init__(self, num_components, input_dim, output_dim, std_bounds,
hidden_dim=None, squash=False):
super().__init__()
self.num_components = num_components
self.input_dim = input_dim
self.output_dim = output_dim
self.squash = squash
self.std_bounds = std_bounds
self.mean_heads = nn.ModuleList([
_head(input_dim, output_dim, hidden_dim=hidden_dim)
for _ in range(num_components)
])
self.std_heads = nn.ModuleList([
_head(input_dim, output_dim, hidden_dim=hidden_dim)
for _ in range(num_components)
])
self.logits_head = _head(input_dim, num_components, hidden_dim=hidden_dim)
def forward(self, x):
# mixture dim will come right after other batch dims
batch_shape = tuple(x.shape[:-1])
mixture_dim = len(batch_shape)
# unnormalized logits to categorical distribution for mixing the modes
logits = self.logits_head(x)
mixture = D.Categorical(logits=logits)
means = torch.stack([head(x) for head in self.mean_heads], dim=mixture_dim)
stds = _rescale(
torch.stack([head(x) for head in self.std_heads], dim=mixture_dim),
*self.std_bounds
)
dists = D.Normal(loc=means, scale=stds)
dists = D.Independent(dists, 1) # diagonal
dist = D.MixtureSameFamily(mixture_distribution=mixture, component_distribution=dists)
if self.squash:
dist = TanhWrappedDistribution(dist)
return dist
# Path: PLEX/models/encoders/vision.py
class R3M_Module(Module):
def __init__(self, R3M_obj):
super().__init__()
self.R3M_obj = R3M_obj
self.bn = nn.BatchNorm1d(self.R3M_obj.outdim)
def forward(self, x, **kwargs):
# "Unprocess" images so that they are in [0, 255] and upsample them to 224x224.
x *= 255
x = x.int()
if (x.shape[-1] != 224 or x.shape[-2] != 224):
preprocess = nn.Sequential(
transforms.Resize(224)
)
x = preprocess(x)
x = self.R3M_obj.forward(x, **kwargs)
x = self.bn(x)
return x
def output_shape(self, input_shape=None):
# The return dim of a BN layer is the same is its input dim (R3M's output dim)
return [self.R3M_obj.outdim]
# Path: PLEX/models/trajectory_models/model.py
import torch
import torch.nn as nn
import torch.distributions as D
import math
import torchvision
import os
import PLEX.util.globals as globals
import robomimic.utils.obs_utils as ObsUtils
from robomimic.models.base_nets import SpatialSoftmax, SpatialMeanPool, Module
from robomimic.models.obs_nets import obs_encoder_factory, ObservationEncoder
from torchvision.models.resnet import BasicBlock, Bottleneck
from PLEX.models.heads.distributions import GaussianHead, GaussianMixtureHead
from PLEX.models.encoders.vision import R3M_Module
from r3m.models.models_r3m import R3M
from r3m import load_r3m_from_path
from r3m import load_r3m
if image_encoder_tune_style == 'all':
tunables.append(self.image_encoder)
else:
for obs_net in self.image_encoder.obs_nets.values():
if isinstance(obs_net, R3M_Module):
# Batch normalization layer tuning
tunables.append(obs_net.bn)
if image_encoder_tune_style == 'fc':
# Nothing to do -- this model doesn't have an fc layer at the end
# But remember that the combiners and the batch normalization layer have already been added to the tunables!
pass
elif image_encoder_tune_style.startswith('last'):
# Last n blocks of ResNet
n = int(image_encoder_tune_style[4:])
assert n >= 0
if n > 0:
blocks = [m for m in obs_net.R3M_obj.convnet.modules() if (isinstance(m, torchvision.models.resnet.BasicBlock) or isinstance(m, torchvision.models.resnet.Bottleneck))]
assert len(blocks) >= n
tunables.extend(blocks[-n:])
else:
raise ValueError(f'Invalid image_encoder_tune_style: {image_encoder_tune_style}')
else: # Then it's Robomimic's encoder.
# Add last (fully-connected) layer
fc_layer = obs_net.nets[-1]
if fc_layer is not None and not isinstance(fc_layer, R3M):
assert isinstance(fc_layer, nn.Linear)
tunables.append(fc_layer)
if image_encoder_tune_style == 'fc':
# We already added the last (fc) layer
pass
elif image_encoder_tune_style.startswith('last'):
# Spatial softmax layer
last_layer = obs_net.nets[1]
if last_layer is not None and not isinstance(last_layer, R3M):
assert isinstance(last_layer, SpatialSoftmax) or isinstance(last_layer, SpatialMeanPool)
tunables.append(last_layer)
# Last n blocks of ResNet
convnet = obs_net.nets[0]
n = int(image_encoder_tune_style[4:])
assert n >= 0
if n > 0:
blocks = [m for m in convnet.modules() if (isinstance(m, BasicBlock) or isinstance(m, torchvision.models.resnet.BasicBlock) or isinstance(m, torchvision.models.resnet.Bottleneck))]
assert len(blocks) >= n
tunables.extend(blocks[-n:])
else:
raise ValueError(f'Invalid image_encoder_tune_style: {image_encoder_tune_style}')
else:
tunables.append(self.state_encoder)
return tunables
def set_requires_grad(self, **kwargs):
# Start by disabling gradients for all parameters
for p in self.parameters():
p.requires_grad = False
# Selectively enable
for x in self._get_tunables(**kwargs):
if isinstance(x, nn.Parameter):
x.requires_grad = True
elif isinstance(x, nn.Module):
for p in x.parameters():
p.requires_grad = True
def _embed_helper(self, value, name, batch_dims):
encoder = getattr(self, f'{name}_encoder')
extra_conditions = (name not in self.modalities_to_mask)
if value is not None and extra_conditions:
return encoder(value)
elif self.impute_style in {'trainable'}:
return torch.tile(getattr(self, f'missing_{name}_embedding'),
(*batch_dims, 1))
elif self.impute_style == 'zero-embedding':
zeros = torch.zeros(self.hidden_dim)
return torch.tile(zeros, (*batch_dims, 1))
else:
raise NotImplementedError
def embed_return(self, rtg, batch_dims):
return self._embed_helper(rtg, 'return', batch_dims)
def embed_proprio(self, proprio, batch_dims):
return self._embed_helper(proprio, 'proprio', batch_dims)
def embed_action(self, action, batch_dims):
return self._embed_helper(action, 'action', batch_dims)
def embed_observations(self, obs, proprios, batch_dims):
if not globals.full_state_mode:
cams2images = obs
for cam in cams2images.keys():
c, h, w = self.obs_dims
cams2images[cam] = cams2images[cam].reshape(-1, c, h, w)
img_embeddings = self.image_encoder(cams2images)
assert img_embeddings.ndim == 2
img_embed_dim = img_embeddings.shape[1]
img_embeddings = img_embeddings.reshape(*batch_dims, img_embed_dim)
prop_embeddings = self.embed_proprio(proprios, batch_dims)
return self.obs_combiner(torch.cat([img_embeddings, prop_embeddings], dim=-1))
else:
# Ignore proprios even if they are present.
state = obs
return self.state_encoder(state)
def embed_image_observations(self, cams2images, batch_dims):
for cam in cams2images.keys():
c, h, w = self.obs_dims
cams2images[cam] = cams2images[cam].reshape(-1, c, h, w)
img_embeddings = self.image_encoder(cams2images)
assert img_embeddings.ndim == 2
img_embed_dim = img_embeddings.shape[1]
img_embeddings = img_embeddings.reshape(*batch_dims, img_embed_dim)
return self.image_obs_combiner(img_embeddings)
def embed_context(self, context, batch_dims):
return self.embed_observations(context, None, [batch_dims[0]])
| def forward(self, context, obs, proprios, actions, rewards, returns_to_go, timesteps, mask, **kwargs): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: mitre/arlin
# Path: arlin/dataset/xrl_dataset.py
class XRLDataset:
"""Class to store experiences from running a policy in an environment."""
def __init__(
self,
environment: gym.Env,
collector: BaseDataCollector = RandomDataCollector,
seed: int = 12345,
):
"""Initialize an XRLDataset.
Args:
environment (gym.Env): Environment to run the policy in.
collector (BaseDataCollector, optional): Collector we want to use to collect
our data. Defaults to RandomDataCollector.
seed (int, optional): Sed for episode creation. Defaults to 12345.
"""
self.env = environment
self.collector = collector
self.seed = seed
self.num_datapoints = 0
self.analyzed = False
for field in dataclasses.fields(self.collector.datapoint_cls):
if not hasattr(self, field.name):
setattr(self, field.name, np.array([], dtype=np.float64))
def __len__(self) -> int:
"""Number of transitions in the dataset.
Returns:
int: Number of transitions in the dataset
"""
return self.num_datapoints
def fill(self, num_datapoints: int = 50000, randomness: float = 0.0) -> None:
"""Add transitions to this dataset.
Args:
num_datapoints (int, optional): Number of datapoints to add.
Defaults to 50000.
randomness (float, optional): How much randomness do we want when taking
actions. Defaults to 0.0.
"""
logging.info(f"Collecting {num_datapoints} datapoints.")
collected_datapoints = 0
num_episodes = 0
datapoint_list = []
self._episode_lens = []
trunc_count = 0
while collected_datapoints < num_datapoints:
datapoints, trunc = self._collect_episode(
seed=self.seed + num_episodes + trunc_count, randomness=randomness
)
if trunc:
logging.debug("\tSkipping episode due to truncation.")
trunc_count += 1
if trunc_count >= 5:
err_str = (
"Too many truncated episodes in a row identified - "
+ "please try modifying the randomness value."
)
raise RuntimeError(err_str)
continue
trunc_count = 0
datapoint_list += datapoints
collected_datapoints += len(datapoints)
num_episodes += 1
logging.info(
f"\tEpisode {num_episodes} |"
f" Collected: {len(datapoints)} |"
f" Total: {collected_datapoints}"
)
logging.info(f"Collected {collected_datapoints} datapoints total.")
if collected_datapoints > num_datapoints:
num_extra = collected_datapoints - num_datapoints
logging.debug(
f"{num_extra} datapoint(s) have been collected for cleaner MDP creation."
)
self._append_datapoints(datapoint_list)
self._analyze_dataset()
self.num_datapoints += collected_datapoints
def _collect_episode(
self, seed: int, randomness: float = 0.0
) -> Tuple[List[Type[BaseDatapoint]], bool]:
"""Collect datapoints from a single episode.
Args:
seed (int): Seed for the episode.
randomness (float, optional): How much randomness do we want when taking
actions. Defaults to 0.0.
Returns:
Tuple[List[Type[BaseDatapoint]], bool]: Datapoints, whether this episode was
truncated or not
"""
ep_datapoints = []
obs, _ = self.env.reset(seed=seed)
self.env.action_space.seed(seed)
step = 0
render = self.env.render()
rng = np.random.default_rng(seed)
term = False
trunc = False
while True:
take_rand_action = rng.random() <= randomness
if step == 0:
take_rand_action = False
if take_rand_action:
action = self.env.action_space.sample()
else:
datapoint, action = self.collector.collect_internal_data(observation=obs)
new_obs, reward, term, trunc, _ = self.env.step(action)
datapoint.add_base_data(obs, action, reward, term, trunc, step, render)
ep_datapoints.append(datapoint)
render = self.env.render()
step += 1
obs = new_obs
if term or trunc:
break
if term:
self._episode_lens += [step] * len(ep_datapoints)
return ep_datapoints, trunc
def _append_datapoints(self, datapoints: List[Type[BaseDatapoint]]):
"""Append the given datapoints to the dataset.
Args:
datapoints (List[Type[BaseDatapoint]]): Datapoints to add to the dataset
"""
start = time.time()
field_names = [i.name for i in dataclasses.fields(self.collector.datapoint_cls)]
data_dict = {i: [] for i in field_names}
for i in range(len(datapoints)):
datapoint = datapoints[i]
for field_name in field_names:
val = getattr(datapoint, field_name)
data_dict[field_name].append(val)
for field_name in field_names:
cur_value = getattr(self, field_name)
new_data = np.array(data_dict[field_name])
if cur_value.size == 0:
setattr(self, field_name, new_data)
else:
updated_value = np.concatenate([cur_value, new_data])
setattr(self, field_name, updated_value)
end = time.time()
logging.debug(f"Converting datapoints took {(end - start) / 60} minutes.")
def _init_analyze(self):
"""Initialize the additional analysis metrics."""
logging.info("Initializing analytics variables.")
self.total_rewards = np.array([], dtype=np.float64)
self.start_indices = np.array([], dtype=np.int8)
self.term_indices = np.array([], dtype=np.int8)
self.trunc_indices = np.array([], dtype=np.int8)
self.unique_state_indices = np.array([], dtype=np.int8)
self.state_mapping = np.array([], dtype=np.int8)
self.steps = self.steps.astype("float32")
def _analyze_dataset(self):
"""Add additional analysis metrics to the dataset that we can't collect."""
if not self.analyzed:
self._init_analyze()
logging.info("Extracting necessary additional data from dataset.")
self._set_total_rewards()
self._set_episode_prog_indices()
self._normalize_steps()
self._set_distinct_state_data()
logging.info("Done setting dataset analysis variables.")
self.analyzed = True
def _set_total_rewards(self):
"""Add information about the total reward received at each step."""
logging.info("\tSetting self.total_rewards.")
total_rewards = []
cur_total = 0
for i in range(self.num_datapoints, len(self.rewards)):
cur_total += self.rewards[i]
total_rewards.append(cur_total)
if self.terminateds[i] or self.truncateds[i]:
cur_total = 0
self.total_rewards = np.concatenate([self.total_rewards, np.array(total_rewards)])
def _set_episode_prog_indices(self):
"""Extract episode start and termination indices from the dataset."""
logging.info("\tSetting self.start_indices.")
logging.info("\tSetting self.term_indices.")
logging.info("\tSetting self.trunc_indices.")
trunc_steps = self.steps[self.num_datapoints : len(self.steps)]
trunc_terms = self.terminateds[self.num_datapoints : len(self.terminateds)]
trunc_truncs = self.truncateds[self.num_datapoints : len(self.truncateds)]
start_indices = np.where(trunc_steps == 0)[0] + self.num_datapoints
term_indices = np.where(trunc_terms == 1)[0] + self.num_datapoints
trunc_indices = np.where(trunc_truncs == 1)[0] + self.num_datapoints
self.start_indices = np.concatenate([self.start_indices, start_indices])
self.term_indices = np.concatenate([self.term_indices, term_indices])
self.trunc_indices = np.concatenate([self.trunc_indices, trunc_indices])
if len(start_indices) == 0:
logging.warning("No start indices identified.")
if len(term_indices) == 0:
logging.warning("No terminated indices identified.")
if len(trunc_indices) == 0:
logging.warning("No truncated indices identified.")
def _normalize_steps(self):
"""Normalize the steps between 0 and 1 depending on time in episode taken."""
logging.info("\tNormalizing self.steps.")
# Only get the data from the most recent fill
cur_fill_steps = deepcopy(self.steps[self.num_datapoints : len(self.steps)])
normalized_steps = []
for i in range(len(cur_fill_steps)):
step = cur_fill_steps[i]
normalized_steps.append(step / self._episode_lens[i])
self.steps[self.num_datapoints : len(self.steps)] = normalized_steps
def _set_distinct_state_data(self):
"""Extract the unique state indices and corresponding state mapping to identify
unique observations in the dataset. T-SNE has trouble with duplicate states so
mapping unique states together is beneficial.
"""
logging.info("\tSetting self.unique_state_indices.")
logging.info("\tSetting self.state_mapping.")
outputs = np.unique(
self.observations, return_index=True, return_inverse=True, axis=0
)
_, unique_state_indices, state_mapping = outputs
self.unique_state_indices = unique_state_indices
self.state_mapping = state_mapping
def get_dict(self) -> Dict[str, List[np.ndarray]]:
"""Get a dictionary representation of this dataset.
Returns:
Dict[str, List[np.ndarray]]: Dictionary representation of this dataset.
"""
out_dict = {}
for field in dataclasses.fields(self.collector.datapoint_cls):
out_dict[field.name] = np.array(getattr(self, field.name))
if self.analyzed:
out_dict["total_rewards"] = self.total_rewards
out_dict["start_indices"] = self.start_indices
out_dict["term_indices"] = self.term_indices
out_dict["trunc_indices"] = self.trunc_indices
out_dict["unique_state_indices"] = self.unique_state_indices
out_dict["state_mapping"] = self.state_mapping
return out_dict
def save(self, file_path: str) -> None:
"""
Save dictionary of datapoints to the given file_path.
Args:
- file_path str: Filepath to save XRL dataset to.
"""
if not file_path[-4:] == ".npz":
file_path += ".npz"
logging.info(f"Saving datapoints to {file_path}...")
os.makedirs(os.path.dirname(file_path), exist_ok=True)
start = time.time()
np.savez_compressed(file_path, **self.get_dict())
end = time.time()
file_size = round(os.path.getsize(file_path) >> 20, 2)
logging.debug(f"\tFile size: {file_size} MB")
logging.debug(f"\tSaved dataset in {(end - start) % 60} minutes.")
def load(self, load_path: str) -> None:
"""Load a XRLDataset from the given path.
Args:
load_path (str): Path to saved XRLDataset.
Raises:
ValueError: Missing a required dataset key.
ValueError: There is no data to load.
ValueError: Input keys do not have the same number of datapoints.
"""
dataset = np.load(load_path)
lens = set()
for key in [
"observations",
"actions",
"rewards",
"terminateds",
"truncateds",
"steps",
"renders",
]:
if key not in dataset:
raise ValueError(f"Invalid dataset - missing {key}.")
if len(dataset[key]) == 0:
raise ValueError(f"Key {key} has no associated data.")
lens.add(len(dataset[key]))
if len(lens) > 1:
raise ValueError("Input keys do not have the same number of datapoints.")
for key in dataset:
setattr(self, key, dataset[key])
self.num_datapoints = len(dataset["observations"])
try:
getattr(self, "total_rewards")
self.analyzed = True
except Exception:
self.analyzed = False
# Path: arlin/dataset/collectors/base_collectors.py
class RandomDataCollector(BaseDataCollector):
"""Data collection when the agent is taking random actions."""
def __init__(self, datapoint_cls: Type[BaseDatapoint], environment: gym.Env):
"""Initialize a RandomDataCollector object.
Args:
datapoint_cls (Type[BaseDatapoint]): Class of datapoint we are collecting.
environment (gym.Env): Environment the policy is interacting with.
"""
super().__init__(datapoint_cls=datapoint_cls)
self.env = environment
def collect_internal_data(
self, observation: np.ndarray
) -> Tuple[Type[BaseDatapoint], int]:
action = self.env.action_space.sample()
return self.datapoint_cls(), action
# Path: arlin/dataset/collectors/sb3_collectors.py
class SB3PPODataCollector(BaseDataCollector):
"""Data collector for a model trained with PPO in stable-baselines3."""
def __init__(self, datapoint_cls: Type[BaseDatapoint], policy: BasePolicy):
super().__init__(datapoint_cls=datapoint_cls)
self.policy = policy
def collect_internal_data(
self, observation: np.ndarray
) -> Tuple[type[BaseDatapoint], int]:
with th.no_grad():
obs = th.Tensor(np.expand_dims(observation, 0))
policy_dist = self.policy.get_distribution(obs)
action = policy_dist.get_actions(deterministic=True).item()
probs = policy_dist.distribution.probs
value = self.policy.predict_values(obs)
features = self.policy.extract_features(obs)
if self.policy.share_features_extractor:
latent_pi, latent_vf = self.policy.mlp_extractor(features)
pi_features = features
vf_features = features
else:
pi_features, vf_features = features
latent_pi = self.policy.mlp_extractor.forward_actor(pi_features)
latent_vf = self.policy.mlp_extractor.forward_critic(vf_features)
datapoint = self.datapoint_cls(
latent_actors=th.squeeze(latent_pi).numpy(),
latent_critics=th.squeeze(latent_vf).numpy(),
dist_probs=th.squeeze(probs).numpy(),
critic_values=th.squeeze(value).item(),
pi_features=th.squeeze(pi_features).numpy(),
vf_features=th.squeeze(vf_features).numpy(),
)
return datapoint, action
# Path: arlin/dataset/collectors/datapoints.py
class BaseDatapoint:
"""Base datapoint with traditional RL data that is common to all algorithms."""
observations: Optional[np.ndarray] = None
actions: Optional[int] = None
rewards: Optional[float] = None
terminateds: Optional[bool] = None
truncateds: Optional[bool] = None
steps: Optional[float] = None
renders: Optional[np.ndarray] = None
def __eq__(self, other: Any):
if not isinstance(other, BaseDatapoint):
return False
self_fields = [i.name for i in dataclasses.fields(self)]
other_fields = [i.name for i in dataclasses.fields(other)]
if not self_fields == other_fields:
return False
for field in self_fields:
if not np.array_equal(getattr(self, field), getattr(other, field)):
return False
return True
def add_base_data(
self,
obs: np.ndarray,
action: int,
reward: float,
terminated: bool,
truncated: bool,
step: float,
render: np.ndarray,
):
"""Add the base RL data to this Datapoint object.
Args:
obs (np.ndarray): Current observation
action (int): Action taken
reward (float): Reward received
terminated (bool): Did the episode end
truncated (bool): Did we run out of steps
step (float): Current step of this data
render (np.ndarray): Render of the environment state
"""
self.observations = obs
self.actions = action
self.rewards = reward
self.terminateds = terminated
self.truncateds = truncated
self.steps = step
self.renders = render
# Path: arlin/dataset/collectors/datapoints.py
class SB3PPODatapoint(BaseDatapoint):
"""Datapoint for a PPO algorithm trained in stable-baselines3."""
latent_actors: Optional[np.ndarray] = None
latent_critics: Optional[np.ndarray] = None
dist_probs: Optional[np.ndarray] = None
critic_values: Optional[float] = None
pi_features: Optional[np.ndarray] = None
vf_features: Optional[np.ndarray] = None
# Path: arlin/generation.py
def generate_clusters(
dataset: XRLDataset,
start_cluster_keys: List[str],
intermediate_cluster_keys: List[str],
term_cluster_keys: List[str],
num_clusters: int,
seed: Optional[int] = None,
) -> Tuple[np.ndarray, object, object, object]:
"""Generate clusters from the given XRLDataset.
NOTE: Order of the keys matters - ensure the data passed in during inference time
matches the order of the keys passed in during cluster generation.
Args:
dataset (XRLDataset): XRLDataset to cluster on.
start_cluster_keys (List[str]): Keys to cluster initial states on
intermediate_cluster_keys (List[str]): Keys to cluster intermediate states on
term_cluster_keys (List[str]): keys to cluster terminal states on
num_clusters (int): Number of intermediate clusters to find in intermediate
(not intitial or terminal) states
seed (Optional[int], optional): Seed for clustering. Defaults to None.
Raises:
ValueError: No initial states found.
ValueError: No terminal states found.
ValueError: Not enough datapoints given (< num_clusters)
Returns:
Tuple(np.ndarray, object, object, object):
Cluster values for each datapoint, initial cluster estimator, intermediate cluster
estimator, terminal cluster estimator
"""
logging.info(f"Generating {num_clusters} clusters.")
start = time.time()
(cluster_on_start, cluster_on_mid, cluster_on_term, mid_mask) = _get_cluster_ons(
dataset, start_cluster_keys, intermediate_cluster_keys, term_cluster_keys
)
if len(cluster_on_start) == 0:
raise ValueError("No initial indices found! Cancelling clustering.")
else:
start_algo = MeanShift()
start_clusters = start_algo.fit(cluster_on_start)
start_clusters = start_clusters.labels_
if len(cluster_on_term) == 0:
raise ValueError("No terminal indices found! Cancelling clustering.")
else:
term_algo = MeanShift()
term_clusters = term_algo.fit(cluster_on_term)
term_clusters = term_clusters.labels_
if num_clusters > len(cluster_on_mid):
raise ValueError(
f"Not enough datapoints {len(cluster_on_mid)} to create \
{num_clusters} clusters."
)
mid_algo = KMeans(n_clusters=num_clusters, random_state=seed, n_init="auto")
mid_clusters = mid_algo.fit(cluster_on_mid)
mid_clusters = mid_clusters.labels_
n_start_clusters = len(set(start_clusters))
start_clusters = np.array([x + num_clusters for x in start_clusters], dtype=int)
term_clusters = np.array(
[x + n_start_clusters + num_clusters for x in term_clusters], dtype=int
)
clusters = np.empty([len(dataset.terminateds)], dtype=int)
clusters[mid_mask] = mid_clusters
clusters[dataset.start_indices] = start_clusters
clusters[dataset.term_indices] = term_clusters
end = time.time()
logging.info(f"\tSuccessfully generated clusters in {end - start} seconds.")
return clusters, start_algo, mid_algo, term_algo
# Path: arlin/generation.py
def generate_embeddings(
dataset: XRLDataset,
activation_key: str,
perplexity: int,
n_train_iter: int,
output_dim: int = 2,
seed: int = 12345,
) -> np.ndarray:
"""Generate TSNE embeddings from the given XRLDataset.
Args:
dataset (XRLDataset): XRLDataset generated from an RL policy.
activation_key (str): Data that we want to embed on.
perplexity (int): Perplexity value for TSNE
n_train_iter (int): Number of training iterations for TSNE
output_dim (int, optional): Output dimensions of the embeddings. Defaults to 2.
seed (int, optional): Seed for TSNE. Defaults to 12345.
Returns:
np.ndarray: TSNE embeddings
"""
logging.info(f"Generating embeddings from dataset.{activation_key}.")
start = time.time()
embedder = TSNE(
n_jobs=4,
n_components=output_dim,
perplexity=perplexity,
n_iter=n_train_iter,
verbose=1,
random_state=seed,
)
activations = getattr(dataset, activation_key)
unique_activations = activations[dataset.unique_state_indices]
embeddings = embedder.fit_transform(unique_activations)
embeddings = [embeddings[index] for index in dataset.state_mapping]
end = time.time()
logging.info(f"\tSuccessfully generated embeddings in {(end - start) % 60} minutes.")
return np.array(embeddings)
# Path: tests/conftest.py
import gymnasium as gym
import pytest
from stable_baselines3 import PPO
from arlin.dataset import XRLDataset
from arlin.dataset.collectors import RandomDataCollector, SB3PPODataCollector
from arlin.dataset.collectors.datapoints import BaseDatapoint, SB3PPODatapoint
from arlin.generation import generate_clusters, generate_embeddings
@pytest.fixture
def env():
# Create environment
env = gym.make("LunarLander-v2", render_mode="rgb_array")
return env
@pytest.fixture
def random_dataset(env):
# Create the datapoint collector for SB3 PPO Datapoints with the model's policy
collector = RandomDataCollector(datapoint_cls=BaseDatapoint, environment=env)
# Instantiate the XRL Dataset
dataset = XRLDataset(env, collector=collector)
dataset.fill(num_datapoints=50, randomness=0.25)
return dataset
@pytest.fixture
def random_embeddings(random_dataset):
embeddings = generate_embeddings(
dataset=random_dataset,
activation_key="observations",
perplexity=5,
n_train_iter=250,
output_dim=2,
seed=12345,
)
return embeddings
@pytest.fixture
def random_clusters(random_dataset):
clusters, start_algo, mid_algo, term_algo = generate_clusters(
random_dataset,
["observations", "rewards"],
["observations", "rewards"],
["rewards"],
10,
seed=1234,
)
return (clusters, start_algo, mid_algo, term_algo)
@pytest.fixture
def ppo_model(env):
model = PPO("MlpPolicy", env, verbose=1)
model.learn(total_timesteps=int(100))
return model
@pytest.fixture
def ppo_collector(ppo_model):
# Create the datapoint collector for SB3 PPO Datapoints with the model's policy
collector = SB3PPODataCollector(
datapoint_cls=SB3PPODatapoint, policy=ppo_model.policy
)
return collector
@pytest.fixture
def ppo_dataset(env, ppo_collector):
# Instantiate the XRL Dataset
dataset = XRLDataset(env, collector=ppo_collector)
dataset.fill(num_datapoints=50, randomness=0.25)
return dataset
@pytest.fixture
| def ppo_embeddings(ppo_dataset): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Giftify-Bot/Giftify-Bot
# Path: bot.py
class Giftify(GiftifyHelper, commands.AutoShardedBot):
user: discord.ClientUser
colour: int = 0xCB3045
__version_info__ = "1.1.4"
def __init__(
self,
*,
log_handler: LogHandler,
pool: asyncpg.Pool,
session: aiohttp.ClientSession,
amari_client: AmariClient,
) -> None:
self._log_handler = log_handler
self._pool = pool
self._session = session
self._amari_client = amari_client
intents = discord.Intents(messages=True, emojis=True, guilds=True)
allowed_mentions = discord.AllowedMentions(everyone=False, roles=False, users=True, replied_user=False)
member_cache_flags = discord.MemberCacheFlags.from_intents(intents=intents)
sentry_sdk.init(
dsn=os.environ["SENTRY_DSN"],
integrations=[
LoggingIntegration(
level=logging.INFO,
event_level=logging.ERROR,
)
],
traces_sample_rate=1.0,
)
super().__init__(
command_prefix=commands.when_mentioned,
tree_cls=CommandTree,
help_command=None,
description="A giveaway bot for hosting giveaways.",
intents=intents,
allowed_mentions=allowed_mentions,
chunk_guilds_at_startup=False,
max_messages=None,
activity=discord.CustomActivity(name="\N{LINK SYMBOL} https://giftifybot.vercel.app"),
member_cache_flags=member_cache_flags,
owner_ids=OWNER_IDS,
)
@property
def log_handler(self) -> LogHandler:
return self._log_handler
@property
def pool(self) -> asyncpg.Pool:
return self._pool
@property
def session(self) -> aiohttp.ClientSession:
return self._session
@property
def amari_client(self) -> AmariClient:
return self._amari_client
@property
def timer_cog(self) -> TimerManager:
return self.get_cog("TimerManager") # type: ignore
def run(self) -> None:
raise NotImplementedError("Please use `.start()` instead.")
async def on_ready(self) -> None:
self.log_handler.log.info("%s got a ready event at %s", self.user.name, datetime.datetime.now())
async def on_resume(self) -> None:
self.log_handler.log.info("%s got a resume event at %s", self.user.name, datetime.datetime.now())
async def on_command_error(self, ctx: commands.Context, error: commands.CommandError) -> None:
if isinstance(error, commands.CommandInvokeError):
origin_ = error.original
assert ctx.command is not None
if not isinstance(origin_, discord.HTTPException):
print(f"In {ctx.command.qualified_name}:", file=sys.stderr)
traceback.print_tb(origin_.__traceback__)
print(f"{origin_.__class__.__name__}: {origin_}", file=sys.stderr)
sentry_sdk.capture_exception(error)
async def start(self) -> None:
await super().start(token=os.environ["TOKEN"], reconnect=True)
async def setup_hook(self) -> None:
self.start_time: datetime.datetime = datetime.datetime.now(datetime.timezone.utc)
self.bot_app_info = await self.application_info()
self.owner_ids = OWNER_IDS
async def get_or_fetch_user(self, user_id: int) -> Optional[discord.User]:
"""Looks up a user in cache or fetches if not found.
Parameters
-----------
user_id: int
The user ID to search for.
Returns
---------
Optional[User]
The user or None if not found.
"""
user = self.get_user(user_id)
if user is not None:
return user
try:
user = await self.fetch_user(user_id)
except discord.HTTPException:
return None
else:
return user
async def get_or_fetch_member(self, guild: discord.Guild, member_id: int) -> Optional[discord.Member]:
"""Looks up a member in cache or fetches if not found.
Parameters
-----------
guild: Guild
The guild to look in.
member_id: int
The member ID to search for.
Returns
---------
Optional[Member]
The member or None if not found.
"""
member = guild.get_member(member_id)
if member is not None:
return member
shard: discord.ShardInfo = self.get_shard(guild.shard_id) # type: ignore # will never be None
if shard.is_ws_ratelimited():
try:
member = await guild.fetch_member(member_id)
except discord.HTTPException:
return None
else:
return member
members = await guild.query_members(limit=1, user_ids=[member_id], cache=True)
if not members:
return None
return members[0]
# Path: models/donation_settings.py
class DonationAction(Enum):
ADD = 0
REMOVE = 1
SYNC = 2
def __str__(self) -> str:
if self == DonationAction.ADD:
return "Added"
elif self == DonationAction.REMOVE:
return "Remove"
else:
return "Synced"
# Path: models/donation_settings.py
class GuildDonationConfig:
"""Represents the donation configuration settings for a guild.
Parameters
----------
bot: Giftify
The bot instance handling the configuration.
guild discord.Guild
The guild associated with the configuration.
category: str
The category or name of the donation configuration.
symbol: str
The symbol or identifier of the donation category.
roles: Dict[int, discord.Role]
A dictionary mapping of amount to `discord.Role`.
managers: List[discord.Role]
A list of `discord.Role` objects representing the roles with donation management permissions.
logging: Optional[discord.TextChannel]
An optional `discord.TextChannel` object used for logging donation events.
"""
__slots__: Tuple[str, ...] = (
"bot",
"guild",
"category",
"symbol",
"roles",
"managers",
"logging",
)
def __init__(
self,
bot: Giftify,
*,
guild: discord.Guild,
category: str,
symbol: str,
roles: Dict[int, discord.Role],
managers: List[discord.Role],
logging: Optional[discord.TextChannel] = None,
):
self.bot = bot
self.guild = guild
self.category = category
self.symbol = symbol
self.roles = roles
self.managers = managers
self.logging = logging
def __str__(self):
return self.category
def __repr__(self):
return f"<GuildDonationConfig guild={self.guild!r}> category={self.category}"
@classmethod
async def create(
cls, guild_id: int, category: str, bot: Giftify, *, symbol: Optional[str] = None
) -> "GuildDonationConfig":
record = await bot.pool.fetchrow(
"INSERT INTO donation_configs (guild, category, symbol) VALUES ($1, $2, $3) RETURNING *",
guild_id,
category,
symbol,
)
instance = await cls.from_record(bot, record=record)
assert instance is not None
return instance
@classmethod
async def from_record(
cls, bot: Giftify, *, record: asyncpg.Record
) -> Optional["GuildDonationConfig"]:
guild = bot.get_guild(record["guild"])
if not guild:
return None
category = record["category"]
symbol = record["symbol"]
roles = {}
managers = []
logging: Optional[discord.TextChannel] = (
guild.get_channel(record["logging"]) if record["logging"] else None
) # type: ignore
for amount, role_id in record["roles"].items():
if role := guild.get_role(role_id):
roles[int(amount)] = role
for role_id in record["managers"]:
if role := guild.get_role(role_id):
managers.append(role)
return cls(
bot,
guild=guild,
category=category,
symbol=symbol,
roles=roles,
managers=managers,
logging=logging,
)
async def update(
self,
key: str,
value: Union[
str, discord.TextChannel, Dict[int, discord.Role], List[discord.Role]
],
) -> None:
"""
Update a specific attribute of the GuildDonationConfig.
Parameters
----------
key: str
The attribute name to be updated. Should be one of "category", "symbol", "logging", "roles", or "managers".
value: Union[str, discord.TextChannel, Dict[int, discord.Role], List[discord.Role]]
The new value for the attribute.
Raises
------
ValueError
If an invalid key is provided.
If the value is not of the expected type for the specified key.
Returns
-------
None
"""
if key not in ["category", "symbol", "logging", "roles", "managers"]:
raise ValueError(
"Invalid key provided. Valid keys are 'category', 'symbol', 'logging', 'roles', and 'managers'."
)
if key in ["category", "symbol"]:
await self._update_config(key, str(value))
setattr(self, key, value)
elif key == "logging":
if not isinstance(value, discord.TextChannel):
raise ValueError("Value for 'logging' must be a discord.TextChannel.")
self.logging = value
await self._update_config(key, value.id)
elif key == "roles":
if not isinstance(value, dict):
raise ValueError("Value for 'roles' must be a dictionary.")
self.roles = value
role_values = {amount: role.id for amount, role in value.items()}
await self._update_config(key, role_values)
elif key == "managers":
if not isinstance(value, list):
raise ValueError("Value for 'managers' must be a list.")
self.managers = value
role_ids = [role.id for role in value]
await self._update_config(key, role_ids)
async def _update_config(
self, key: str, value: Union[str, int, List[int], Dict[int, int]]
) -> None:
await self.bot.pool.execute(
f"UPDATE donation_configs SET {key} = $1 WHERE guild = $2 AND category = $3",
value,
self.guild.id,
self.category,
)
async def delete(self):
await self.bot.pool.execute(
"DELETE FROM donation_configs WHERE guild = $1 AND category = $2",
self.guild.id,
self.category,
)
async def reset(self):
await self.bot.pool.execute(
"DELETE FROM donations WHERE guild = $1 AND category = $2",
self.guild.id,
self.category,
)
# Path: utils/constants.py
DONATE_EMOJI = "<:GiftifyDonate:1122076957375471647>"
# Path: utils/constants.py
MINUS_EMOJI = "<:GiftifyMinus:1122076950421327872>"
# Path: utils/constants.py
MONEY_EMOJI = "<:GiftifyMoney:1122076961422975059>"
# Path: utils/constants.py
PLUS_EMOJI = "<:GiftifyPlus:1122076954556903494>"
# Path: utils/constants.py
SUCCESS_EMOJI = "<:GiftifySuccess:1100674526318166048>"
# Path: utils/exceptions.py
class DonationCategoryError(app_commands.CheckFailure):
def __init__(self, message: str):
self.message = message
# Path: utils/exceptions.py
class DonationError(Exception):
pass
# Path: utils/exceptions.py
class DonationPermissionsError(app_commands.CheckFailure):
def __init__(self, message: str):
self.message = message
# Path: utils/paginator.py
class BaseButtonPaginator(Generic[T], discord.ui.View, abc.ABC):
"""The base implementation of a button paginator. This class should be inherited
then the custom instance defined.
Parameters
----------
entries: List[Any]
The entries to paginate.
per_page: int
The amount of entries to show per page.
clamp_pages: bool
Whether to clamp the pages to the max and min page. This means that when the user
reaches the max page, it will go back to the first page. Likewise, when the user
reaches the first page, it will go back to the last page.
target: Optional[Union[discord.Interaction, commands.Context]]
The target interaction or context to use for the paginator. This is used to
ensure that the user invoking the paginator is the same user that is interacting
with the paginator.
If this is ``None`` then the interaction check will always return True.
"""
def __init__(
self,
*,
entries: List[T],
per_page: int = 6,
clamp_pages: bool = True,
target: Optional[TargetType] = None,
extras: Optional[Dict[Any, Any]] = None,
) -> None:
super().__init__(timeout=180)
self.entries: List[T] = entries
self.per_page: int = per_page
self.clamp_pages: bool = clamp_pages
self.target: Optional[TargetType] = target
self.extras = extras
self.author: Optional[Union[discord.User, discord.Member]] = target and (
target.user if isinstance(target, discord.Interaction) else target.author
)
self.bot: Optional[Giftify] = target and (
target.client if isinstance(target, discord.Interaction) else target.bot
)
self._current_page_index = 0
self.pages = [
entries[i : i + per_page] for i in range(0, len(entries), per_page)
]
@property
def max_page(self) -> int:
"""The max page count for this paginator."""
return len(self.pages)
@property
def min_page(self) -> int:
"""The min page count for this paginator."""
return 1
@property
def current_page(self) -> int:
"""The current page the user is on."""
return self._current_page_index + 1
@property
def total_pages(self) -> int:
"""Returns the total amount of pages."""
return len(self.pages)
@abc.abstractmethod
def format_page(self, entries: List[T], /) -> discord.Embed:
"""
Used to make the embed that the user sees. This can be a coroutine or a regular
function. This must be overwritten by the subclass.
Parameters
----------
entries: List[Any]
A list of entries for the current page.
Returns
-------
discord.Embed
The embed for this page.
"""
raise NotImplementedError("Subclass did not overwrite format_page coro.")
async def embed(self) -> discord.Embed:
"""
A helper function to get the embed for the current page.
Returns
-------
discord.Embed
The embed for the current page.
"""
return await discord.utils.maybe_coroutine(
self.format_page, self.pages[self._current_page_index]
)
async def interaction_check(self, interaction: Interaction, /) -> Optional[bool]:
"""
The base interaction check for the given view.
This will always return ``True`` if the target is ``None``, otherwise it will check
that the user invoking the paginator is the same user that is interacting with the
paginator.
Parameters
----------
interaction: discord.Interaction
The interaction to check.
Returns
-------
Optional[bool]
The result of the interaction check. If this returns ``None`` then the interaction
was responded to with an error message to the user.
"""
if self.target is None:
return True
assert self.author
# Ensure this is the correct invoker
if self.author.id != interaction.user.id:
return await interaction.response.send_message(
"Hey, this isn't yours!", ephemeral=True
)
# Ensure they invoke it in the correct channel.
if (
self.target.channel
and interaction.channel
and self.target.channel.id != interaction.channel.id
):
return await interaction.response.send_message(
"Hey, this isn't in the right channel!", ephemeral=True
)
return True
def _switch_page(self, count: int, /) -> None:
self._current_page_index += count
if self.clamp_pages:
if count < 0: # Going down
if self._current_page_index < 0:
self._current_page_index = self.max_page - 1
elif count > 0: # Going up
if self._current_page_index > self.max_page - 1: # - 1 for indexing
self._current_page_index = 0
return
@discord.ui.button(emoji=ARROW_BACK_EMOJI)
async def on_arrow_backward(
self, interaction: Interaction, button: discord.ui.Button[BaseButtonPaginator]
) -> discord.InteractionMessage:
"""
The button to represent going backwards a page.
Parameters
----------
interaction: discord.Interaction
The interaction created from the user invoking the button.
button: discord.ui.Button
The button that was pressed.
"""
await interaction.response.defer()
self._switch_page(-1)
embed = await self.embed()
return await interaction.edit_original_response(embed=embed)
@discord.ui.button(emoji=STOP_EMOJI)
async def on_stop(
self, interaction: Interaction, button: discord.ui.Button[BaseButtonPaginator]
) -> discord.InteractionMessage:
"""
The button to represent stopping the paginator. This will disable all children
to the view then edit the original message with the updated view.
Parameters
----------
interaction: discord.Interaction
The interaction created from the user invoking the button.
button: discord.ui.Button
The button that was pressed.
"""
await interaction.response.defer()
for child in self.children:
child.disabled = True # type: ignore
self.stop()
return await interaction.edit_original_response(view=self)
@discord.ui.button(emoji=ARROW_EMOJI)
async def on_arrow_forward(
self, interaction: Interaction, button: discord.ui.Button[BaseButtonPaginator]
) -> discord.InteractionMessage:
"""
The button to represent going forward a page.
Parameters
----------
interaction: discord.Interaction
The interaction created from the user invoking the button.
button: discord.ui.Button
The button that was pressed.
"""
await interaction.response.defer()
self._switch_page(1)
embed = await self.embed()
return await interaction.edit_original_response(embed=embed)
# Path: utils/transformers.py
class AmountTransformer(app_commands.Transformer):
async def transform(self, interaction: Interaction, argument: str) -> int:
match = AMOUNT_REGEX.match(argument.lower())
if match:
value = float(match.group(1).replace(",", ""))
suffix = match.group(2)
multiplier_roles = AMOUNT_DICT.get(suffix, 1)
result = int(value * multiplier_roles)
if result > 100_000_000_000_000:
raise InvalidAmount("Invalid amount. The number is too big.")
return result
elif argument.isdigit():
result = int(argument)
if result > 100_000_000_000_000:
raise InvalidAmount("Invalid amount. The number is too big.")
return result
else:
try:
result = int(float(argument))
if result > 100_000_000_000_000:
raise InvalidAmount("Invalid amount. The number is too big.")
return result
except ValueError:
raise InvalidAmount(
"Invalid amount format. Please provide a valid numerical value."
)
# Path: utils/transformers.py
class DonationCategoryTransformer(app_commands.Transformer):
async def transform(
self, interaction: Interaction, value: str
) -> GuildDonationConfig:
assert interaction.guild is not None
config = interaction.client.get_donation_config(interaction.guild, value)
if not config:
raise InvalidDonationCategoryError(
f"The donation category of name {value} does not exist!",
)
return config
async def autocomplete(
self,
interaction: Interaction,
current: str,
) -> List[app_commands.Choice[str]]:
assert interaction.guild is not None
return [
app_commands.Choice(name=category, value=category)
for category in interaction.client.get_guild_donation_categories(
interaction.guild
)
if current.lower() in category.lower()
]
# Path: utils/tree.py
class CommandTree(app_commands.CommandTree):
async def on_error(
self,
interaction: Interaction,
error: app_commands.AppCommandError,
) -> None:
# Path: cogs/donations/donations.py
import datetime
import asyncpg
import discord
from typing import List, Optional, Tuple
from discord import app_commands
from discord.app_commands import Transform
from discord.ext import commands
from bot import Giftify
from models.donation_settings import DonationAction, GuildDonationConfig
from utils.constants import (
DONATE_EMOJI,
MINUS_EMOJI,
MONEY_EMOJI,
PLUS_EMOJI,
SUCCESS_EMOJI,
)
from utils.exceptions import (
DonationCategoryError,
DonationError,
DonationPermissionsError,
)
from utils.paginator import BaseButtonPaginator
from utils.transformers import AmountTransformer, DonationCategoryTransformer
from utils.tree import Interaction
from __future__ import annotations
def is_manager():
async def predicate(interaction: Interaction) -> bool:
assert interaction.guild is not None
assert isinstance(interaction.user, discord.Member)
category = interaction.namespace.category
config = interaction.client.get_donation_config(interaction.guild, category)
if not config:
raise DonationCategoryError("That is not a valid donation category.")
if interaction.user.guild_permissions.manage_guild:
return True
for role in config.managers:
if role in interaction.user.roles:
return True
else:
raise DonationPermissionsError(
"You do not have permissions to use this command."
)
return app_commands.check(predicate)
class DonationsLeaderboardPaginator(BaseButtonPaginator[asyncpg.Record]):
async def format_page(self, donations: List[asyncpg.Record], /) -> discord.Embed:
assert self.bot is not None
extras = self.extras or {}
description = "The top donors of this server are:\n\n"
for i, record in enumerate(donations):
description += f"`{i + 1}.` <@!{record['member']}> - **{extras.get('symbol')} {record['amount']:,}**\n"
embed = discord.Embed(
title=f"{MONEY_EMOJI} Top {extras.get('category') } Donors",
description=description,
color=self.bot.colour,
)
embed.set_thumbnail(url=self.bot.user.display_avatar)
embed.set_footer(text=f"Page {self.current_page}/{self.total_pages}")
return embed
class DonationCommands(commands.GroupCog):
"""Main cog for updating user donations."""
bot: Giftify
async def update_roles(
self, member: discord.Member, amount: int, config: GuildDonationConfig
) -> Tuple[List[str], List[str]]:
to_add: List[discord.Role] = []
to_remove: List[discord.Role] = []
for role_amount, role in config.roles.items():
if amount >= role_amount:
if role not in member.roles:
to_add.append(role)
else:
if role in member.roles:
to_remove.append(role)
try:
await member.add_roles(*to_add)
except discord.HTTPException:
pass
try:
await member.remove_roles(*to_remove)
except discord.HTTPException:
pass
return [role.mention for role in to_add], [role.mention for role in to_remove]
async def update_donation(
self,
*,
member: discord.Member,
amount: int,
action: DonationAction,
config: GuildDonationConfig,
) -> Tuple[int, List[str], List[str]]:
async with self.bot.pool.acquire() as connection:
async with connection.transaction():
query = """SELECT amount FROM donations
WHERE member = $1 AND guild = $2 AND category = $3"""
existing_amount = await connection.fetchval(
query, member.id, member.guild.id, config.category
)
if action == DonationAction.ADD:
query = """INSERT INTO donations (member, guild, category, amount)
VALUES ($1, $2, $3, $4)
ON CONFLICT (member, guild, category)
DO UPDATE SET amount = donations.amount + $4
RETURNING amount"""
updated_amount = await connection.fetchval(
query, member.id, member.guild.id, config.category, amount
)
else:
if existing_amount is None or amount > existing_amount:
raise DonationError(
"Cannot remove more than the existing amount."
)
else:
query = """UPDATE donations
SET amount = amount - $1
| WHERE member = $2 AND guild = $3 AND category = $4 |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Zjy0401/CoCoFormer
# Path: utilities/argument_funcs.py
def parse_generate_args():
parser = argparse.ArgumentParser()
parser.add_argument("-midi_root", type=str, default="./dataset/dataset/JSF", help="Midi file to prime the generator with")
parser.add_argument("-output_dir", type=str, default="./generate", help="Folder to write unconditional generated midi to")
parser.add_argument("-conditional_output_dir", type=str, default="./generate", help="conditional output dir")
parser.add_argument("-primer_file", type=str, default=None, help="File path or integer index to the evaluation dataset. Default is to select a random index.")
parser.add_argument("--gpu", default=[0], nargs='+', type=int, help="For Multi-GPUs generate")
parser.add_argument("--force_cpu", action="store_true", help="Forces model to run on a cpu even when gpu is available")
parser.add_argument("-word2event", type=str, default='./dataset/word2event.pkl', help='word table location: *.pkl')
parser.add_argument("-target_seq_length", type=int, default=2048, help="Target length you'd like the midi to be")
parser.add_argument("-num_prime", type=int, default=256, help="Amount of messages to prime the generator with")
parser.add_argument("-model_weights", type=str, default="./baseline_loss3_CBSATBoutput_0.4_0.2_1/weights/epoch_0040.pickle",
help="Pickled model weights file saved with torch.save and model.state_dict()")
parser.add_argument("-beam", type=int, default=0, help="Beam search k. 0 for random probability sample and 1 for greedy")
parser.add_argument("--rpr", action="store_true", help="Use a modified Transformer for Relative Position Representations")
parser.add_argument("-max_sequence", type=int, default=2048, help="Maximum midi sequence to consider")
parser.add_argument("-n_layers", type=int, default=6, help="Number of decoder layers to use")
parser.add_argument("-num_heads", type=int, default=8, help="Number of heads to use for multi-head attention")
parser.add_argument("-d_model", type=int, default=512, help="Dimension of the model (output dim of embedding layers, etc.)")
parser.add_argument("-dim_feedforward", type=int, default=1024, help="Dimension of the feedforward layer")
return parser.parse_args()
# Path: utilities/argument_funcs.py
def print_generate_args(args):
print(SEPERATOR)
print("midi_root:", args.midi_root)
print("output_dir:", args.output_dir)
print("primer_file:", args.primer_file)
print("force_cpu:", args.force_cpu)
print("")
print("target_seq_length:", args.target_seq_length)
print("num_prime:", args.num_prime)
print("model_weights:", args.model_weights)
print("beam:", args.beam)
print("")
print("rpr:", args.rpr)
print("max_sequence:", args.max_sequence)
print("n_layers:", args.n_layers)
print("num_heads:", args.num_heads)
print("d_model:", args.d_model)
print("")
print("dim_feedforward:", args.dim_feedforward)
print(SEPERATOR)
print("")
# Path: model/CoCoFormer.py
class CoCoformer(nn.Module):
def __init__(self, word2event, event2word, n_layers=6, num_heads=8, d_model=512, dim_feedforward=1024,
dropout=0.1, max_sequence=2048, c_max_seq=256, b_max_seq=1024, rpr=False):
super(CoCoformer, self).__init__()
self.dummy = DummyDecoder()
self.nlayers = n_layers
self.nhead = num_heads
self.d_model = d_model
self.d_ff = dim_feedforward
self.dropout = dropout
self.max_seq = max_sequence
self.c_max_seq = c_max_seq
self.b_max_seq = b_max_seq
self.rpr = rpr
# word2event and event2word:
self.word2event = word2event
self.event2word = event2word
# past layer of chord
self.cpast_layer_dmodel = d_model
self.cpast_layer_nhead = 8
self.cpast_dim_forward = 256
self.cpast_layer_max_seq = 256
self.cpast_layer_nlayers = 1
# past layer of beats
self.bpast_layer_dmodel = d_model
self.bpast_layer_nhead = 8
self.bpast_dim_forward = 256
self.bpast_layer_max_seq = 1024
self.bpast_layer_nlayers = 1
# Input embedding
self.n_embedding = nn.Embedding(VOCAB_SIZE, self.d_model)
self.c_embedding = nn.Embedding(VOCAB_SIZE, self.cpast_layer_dmodel)
self.b_embedding = nn.Embedding(VOCAB_SIZE, self.bpast_layer_dmodel)
# Positional encoding
self.n_positional_encoding = PositionalEncoding(self.d_model, self.dropout, self.max_seq)
self.c_positional_encoding = PositionalEncoding(self.cpast_layer_dmodel, self.dropout, self.cpast_layer_max_seq)
self.b_positional_encoding = PositionalEncoding(self.bpast_layer_dmodel, self.dropout, self.bpast_layer_max_seq)
# Base transformer
if not self.rpr:
# To make a decoder-only transformer we need to use masked encoder layers
# Dummy decoder to essentially just return the encoder output
encoder_norm = LayerNorm(self.d_model)
encoder_past_layer = TransformerEncoderPastLayer(self.cpast_layer_dmodel, self.cpast_layer_nhead,
self.cpast_dim_forward, self.bpast_layer_dmodel,
self.bpast_layer_nhead, self.bpast_dim_forward,
self.d_model, self.nhead,
self.d_ff, self.dropout)
encoder_layer = TransformerEncoderLayer(self.d_model, self.nhead, self.d_ff, self.dropout)
encoder = TransformerEncoder(encoder_layer, self.nlayers, encoder_past_layer, self.max_seq, self.c_max_seq,
self.b_max_seq, encoder_norm)
self.transformer = nn.Transformer(
d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,
num_decoder_layers=0, dropout=self.dropout, # activation=self.ff_activ,
dim_feedforward=self.d_ff, custom_encoder=encoder, custom_decoder=self.dummy
)
# RPR Transformer
elif self.rpr:
encoder_norm = LayerNorm(self.d_model)
encoder_layer = TransformerEncoderLayerRPR(self.d_model, self.nhead, self.d_ff, self.dropout,
er_len=self.max_seq)
encoder_past_layer = TransformerEncoderLayerRPR_(self.cpast_layer_dmodel, self.cpast_layer_nhead,
self.cpast_dim_forward, self.bpast_layer_dmodel,
self.bpast_layer_nhead, self.bpast_dim_forward,
self.d_model, self.nhead,
self.d_ff, self.dropout, er_len=self.max_seq)
encoder = TransformerEncoderRPR(encoder_layer, self.nlayers, encoder_past_layer, self.max_seq,
self.c_max_seq, self.b_max_seq, encoder_norm)
self.transformer = nn.Transformer(
d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,
num_decoder_layers=0, dropout=self.dropout, # activation=self.ff_activ,
dim_feedforward=self.d_ff, custom_decoder=self.dummy, custom_encoder=encoder
)
# Final output is a softmaxed linear layer
# TODO: verify the size of linear
self.Norm1 = nn.LayerNorm(1024)
self.ReLU = nn.ReLU()
self.Norm2 = nn.LayerNorm(181)
self.Dropout = nn.Dropout(dropout)
self.transLinear = nn.Linear(256, 256)
self.Wout1 = nn.Linear(self.d_model, 1024)
self.Wout2 = nn.Linear(1024, 1024)
self.Wout3 = nn.Linear(1024, VOCAB_SIZE)
self.softmax = nn.Softmax(dim=-1)
def _reset_parameters(self):
r"""Initiate parameters in the transformer model."""
for p in self.parameters():
if p.dim() > 1:
xavier_uniform_(p)
# forward
def forward(self, x1, x2, x3, mask=True):
args = parse_train_args()
# for pure-Transformer:
# Transformer module:
if mask is True:
if args.gpu[0] != -1:
mask = self.transformer.generate_square_subsequent_mask(x1.shape[1]).cuda(device=args.gpu[0])
else:
mask = self.transformer.generate_square_subsequent_mask(x1.shape[1]).cpu()
else:
mask = None
# Input shape is (max_seq, batch_size, d_model)
x_n = self.n_embedding(x1)
x_n = x_n.permute(1, 0, 2)
x_n = self.n_positional_encoding(x_n)
x_c = self.c_embedding(x2)
x_c = x_c.permute(1, 0, 2)
x_c = self.c_positional_encoding(x_c)
x_b = self.b_embedding(x3)
x_b = x_b.permute(1, 0, 2)
x_b = self.b_positional_encoding(x_b)
# Since there are no true decoder layers, the tgt is unused
# Pytorch wants src and tgt to have some equal dims however
x_out = self.transformer(src=torch.cat((x_n, x_c, x_b), dim=0), tgt=x_n,
src_mask=mask)
# x_out = self.transformer(src=x_transformer, tgt=x_transformer, src_mask=mask)
# Back to (batch_size, max_seq, d_model)
x_out = x_out.permute(1, 0, 2)
# concat
# x_concat = torch.cat([x_out, x_out2], dim=1)
y = self.Dropout(self.Norm1(self.ReLU(self.Wout1(x_out))))
y = self.Dropout(self.Norm1(self.ReLU(self.Wout2(y))))
y = self.Wout3(y)
# y = self.Wout2(y)
# y = self.softmax(y)
del mask
# They are trained to predict the next note in sequence (we don't need the last one)
return y
# unconditional generate
def generate(self, primer=None, target_seq_length=1024, beam=0, beam_chance=1.0):
assert (not self.training), "Cannot generate while in training mode"
print("Generating sequence of max length:", target_seq_length)
gen_seq = torch.full((1, target_seq_length), TOKEN_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())
num_primer = len(primer)
gen_seq[..., :num_primer] = primer.type(TORCH_LABEL_TYPE).to(get_device())
# print("primer:",primer)
# print(gen_seq)
cur_i = num_primer
while cur_i < target_seq_length:
# gen_seq_batch = gen_seq.clone()
y = self.softmax(self.forward(gen_seq[..., :cur_i]))[..., :len(self.word2event)]
token_probs = y[:, cur_i - 1, :]
if beam == 0:
beam_ran = 2.0
else:
beam_ran = random.uniform(0, 1)
if beam_ran <= beam_chance:
token_probs = token_probs.flatten()
top_res, top_i = torch.topk(token_probs, beam)
beam_rows = top_i // VOCAB_SIZE
beam_cols = top_i % VOCAB_SIZE
gen_seq = gen_seq[beam_rows, :]
gen_seq[..., cur_i] = beam_cols
else:
distrib = torch.distributions.categorical.Categorical(probs=token_probs)
next_token = distrib.sample()
# print("next token:",next_token)
gen_seq[:, cur_i] = next_token
# Let the transformer decide to end if it wants to
# if next_token == TOKEN_END:
# print("Model called end of sequence at:", cur_i, "/", target_seq_length)
# break
cur_i += 1
if cur_i % 50 == 0:
print(cur_i, "/", target_seq_length)
return gen_seq[:, :cur_i]
# conditional generate
def conditional_generate(self, beats, chord, seq, c, bs, ba, bt, bb, target_seq_length=1024, beam=0, beam_chance=1.0):
assert (not self.training), "Cannot generate while in training mode"
print("Generating sequence of max length:", target_seq_length)
chord = torch.tensor(chord, device=get_device()).unsqueeze(0)
beats = torch.tensor(beats, device=get_device()).unsqueeze(0)
gen_seq = torch.full((1, target_seq_length), TOKEN_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())
primer = torch.tensor([c[0], bs[0], seq[0], ba[0]])
primer_num = 1 # decide key to add
num_primer = len(primer)
gen_seq[..., :num_primer] = primer.type(TORCH_LABEL_TYPE).to(get_device())
# print("primer:",primer)
# print(gen_seq)
cur_i = num_primer
# first input: C B N B
cur_i_n = 1
cur_i_b = 2
cur_i_c = 1
check_error = 0
pbar = tqdm(total=len(seq)*9)
while cur_i < target_seq_length:
a = gen_seq[..., :cur_i].cpu().numpy()
# gen_seq_batch = gen_seq.clone()
# print("input:", gen_seq[..., :cur_i], chord[..., :cur_i_c], beats[..., :cur_i_b])
y = self.softmax(self.forward(gen_seq[..., :cur_i], chord[..., :cur_i_c],
beats[..., :cur_i_b]))[..., :len(self.word2event)]
token_probs = y[:, cur_i - 1, :]
# check for y
distrib = torch.distributions.categorical.Categorical(probs=token_probs)
next_token = distrib.sample()
if check_error > 256:
print("error! regenerate!")
return False
# next token is the next token
if cur_i % 9 == 1: # token is chord, next token must be beats
if not 178 < next_token < 191: # if it is not beat
check_error += 1
continue
if cur_i % 9 in [2, 4, 6, 8]: # this token must be beat, next token must be note
if not next_token < 129: # if it is not note
check_error += 1
continue
else: # this token must be note, next token must be chord or beat
if not 128 < next_token < 191: # if it is chord or beat
check_error += 1
continue
if beam == 0:
beam_ran = 2.0
else:
beam_ran = random.uniform(0, 1)
if beam_ran <= beam_chance:
token_probs = token_probs.flatten()
top_res, top_i = torch.topk(token_probs, beam)
beam_rows = top_i // VOCAB_SIZE
beam_cols = top_i % VOCAB_SIZE
gen_seq = gen_seq[beam_rows, :]
gen_seq[..., cur_i] = beam_cols
else:
# print("next token:",next_token)
gen_seq[:, cur_i] = next_token
cur_i += 1
pbar.update(1)
cur_i_n += 1
if cur_i % 9 == 0 and primer_num < len(seq):
# add C B_S N_S B_A
gen_seq[:, cur_i] = chord.squeeze()[primer_num]
gen_seq[:, cur_i+1] = torch.tensor(bs[primer_num], device=get_device())
gen_seq[:, cur_i+2] = torch.tensor(seq[primer_num], device=get_device())
gen_seq[:, cur_i+3] = torch.tensor(ba[primer_num], device=get_device())
primer_num += 1
cur_i += 4
pbar.update(4)
cur_i_n += 1
cur_i_b += 2
cur_i_c += 1
# a = gen_seq[..., :cur_i].cpu().numpy()
if cur_i % 9 != 0 and cur_i % 9 != 4 and primer_num < len(seq) + 1:
# add B
gen_seq[:, cur_i] = beats.squeeze()[cur_i_b]
cur_i_b += 1
cur_i_n += 1
cur_i += 1
pbar.update(1)
# a = gen_seq[..., :cur_i].cpu().numpy()
if primer_num == len(seq) and cur_i == len(seq) * 9:
print("Model called end of sequence at:", cur_i, "/", target_seq_length)
break
# print(cur_i, "/", target_seq_length)
print("all errors:%d" % check_error)
return gen_seq[:, :cur_i]
# Path: dataset/jsf.py
def create_jsf_datasets(dataset_root, max_seq, random_seq=True):
train_root = os.path.join(dataset_root, "train")
# val_root = os.path.join(dataset_root, "val")
test_root = os.path.join(dataset_root, "test")
train_dataset = MultiJSFDataset(train_root, max_seq, random_seq)
# val_dataset = JSFDataset(val_root, max_seq, random_seq)
test_dataset = MultiJSFDataset(test_root, max_seq, random_seq)
return train_dataset, test_dataset
# Path: dataset/jsf.py
def compute_jsf_accuracy(out, tgt):
softmax = nn.Softmax(dim=-1)
out = torch.argmax(softmax(out), dim=-1)
# test for bug:
# out = np.array(out.cpu())
# tgt = np.array(tgt.cpu())
# only calculate note:
# out = out[:, :2048].flatten()
# tgt = tgt[:, :2048].flatten()
out = out.flatten()
tgt = tgt.flatten()
mask = (tgt != TOKEN_PAD)
out = out[mask]
tgt = tgt[mask]
# Empty
if (len(tgt) == 0):
return 1.0
num_right = (out == tgt)
num_right = torch.sum(num_right).type(TORCH_FLOAT)
acc = num_right / len(tgt)
return acc
# Path: dataset/jsf.py
def process_midi(raw_mid, max_seq, random_seq):
x = torch.full((max_seq,), TOKEN_PAD, dtype=TORCH_LABEL_TYPE, device=cpu_device())
tgt = torch.full((max_seq,), TOKEN_PAD, dtype=TORCH_LABEL_TYPE, device=cpu_device())
raw_len = len(raw_mid)
full_seq = max_seq + 1 # Performing seq2seq
if raw_len == 0:
return x, tgt
if raw_len < full_seq:
x[:raw_len] = raw_mid
tgt[:raw_len - 1] = raw_mid[1:]
tgt[raw_len - 1] = TOKEN_END
else:
# Randomly selecting a range
if random_seq:
end_range = raw_len - full_seq
start = random.randint(SEQUENCE_START, end_range)
# Always taking from the start to as far as we can
else:
start = SEQUENCE_START
end = start + full_seq
data = raw_mid[start:end]
x = data[:max_seq]
tgt = data[1:full_seq]
# print("x:",x)
# print("tgt:",tgt)
return x, tgt
# Path: utilities/device.py
def get_device():
if((not USE_CUDA) or (TORCH_CUDA_DEVICE is None)):
return TORCH_CPU_DEVICE
else:
return TORCH_CUDA_DEVICE
# Path: utilities/device.py
def use_cuda(cuda_bool):
global USE_CUDA
USE_CUDA = cuda_bool
# Path: utilities/argument_funcs.py
def parse_generate_args():
parser = argparse.ArgumentParser()
parser.add_argument("-midi_root", type=str, default="./dataset/dataset/JSF", help="Midi file to prime the generator with")
parser.add_argument("-output_dir", type=str, default="./generate", help="Folder to write unconditional generated midi to")
parser.add_argument("-conditional_output_dir", type=str, default="./generate", help="conditional output dir")
parser.add_argument("-primer_file", type=str, default=None, help="File path or integer index to the evaluation dataset. Default is to select a random index.")
parser.add_argument("--gpu", default=[0], nargs='+', type=int, help="For Multi-GPUs generate")
parser.add_argument("--force_cpu", action="store_true", help="Forces model to run on a cpu even when gpu is available")
parser.add_argument("-word2event", type=str, default='./dataset/word2event.pkl', help='word table location: *.pkl')
parser.add_argument("-target_seq_length", type=int, default=2048, help="Target length you'd like the midi to be")
parser.add_argument("-num_prime", type=int, default=256, help="Amount of messages to prime the generator with")
parser.add_argument("-model_weights", type=str, default="./baseline_loss3_CBSATBoutput_0.4_0.2_1/weights/epoch_0040.pickle",
help="Pickled model weights file saved with torch.save and model.state_dict()")
parser.add_argument("-beam", type=int, default=0, help="Beam search k. 0 for random probability sample and 1 for greedy")
parser.add_argument("--rpr", action="store_true", help="Use a modified Transformer for Relative Position Representations")
parser.add_argument("-max_sequence", type=int, default=2048, help="Maximum midi sequence to consider")
parser.add_argument("-n_layers", type=int, default=6, help="Number of decoder layers to use")
parser.add_argument("-num_heads", type=int, default=8, help="Number of heads to use for multi-head attention")
parser.add_argument("-d_model", type=int, default=512, help="Dimension of the model (output dim of embedding layers, etc.)")
parser.add_argument("-dim_feedforward", type=int, default=1024, help="Dimension of the feedforward layer")
return parser.parse_args()
# Path: conditional_generate.py
import torch
import torch.nn as nn
import os
import random
import math
import mido
import music21
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from utilities.argument_funcs import parse_generate_args, print_generate_args
from model.CoCoFormer import CoCoformer
from dataset.jsf import create_jsf_datasets, compute_jsf_accuracy, process_midi
from torch.utils.data import DataLoader
from torch.optim import Adam
from utilities.constants import *
from utilities.device import get_device, use_cuda
from utilities.argument_funcs import parse_generate_args
##### read word2event event2word
args = parse_generate_args()
f = open(args.word2event, 'rb')
word2event = pickle.load(f)
# reverse the vector event2word
event2word = {}
| for key, val in word2event.items(): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: emadeldeen24/ECGTransForm
# Path: models.py
class ecgTransForm(nn.Module):
def __init__(self, configs, hparams):
super(ecgTransForm, self).__init__()
filter_sizes = [5, 9, 11]
self.conv1 = nn.Conv1d(configs.input_channels, configs.mid_channels, kernel_size=filter_sizes[0],
stride=configs.stride, bias=False, padding=(filter_sizes[0] // 2))
self.conv2 = nn.Conv1d(configs.input_channels, configs.mid_channels, kernel_size=filter_sizes[1],
stride=configs.stride, bias=False, padding=(filter_sizes[1] // 2))
self.conv3 = nn.Conv1d(configs.input_channels, configs.mid_channels, kernel_size=filter_sizes[2],
stride=configs.stride, bias=False, padding=(filter_sizes[2] // 2))
self.bn = nn.BatchNorm1d(configs.mid_channels)
self.relu = nn.ReLU()
self.mp = nn.MaxPool1d(kernel_size=2, stride=2, padding=1)
self.do = nn.Dropout(configs.dropout)
self.conv_block2 = nn.Sequential(
nn.Conv1d(configs.mid_channels, configs.mid_channels * 2, kernel_size=8, stride=1, bias=False, padding=4),
nn.BatchNorm1d(configs.mid_channels * 2),
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, stride=2, padding=1)
)
self.conv_block3 = nn.Sequential(
nn.Conv1d(configs.mid_channels * 2, configs.final_out_channels, kernel_size=8, stride=1, bias=False,
padding=4),
nn.BatchNorm1d(configs.final_out_channels),
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, stride=2, padding=1),
)
self.inplanes = 128
self.crm = self._make_layer(SEBasicBlock, 128, 3)
self.encoder_layer = nn.TransformerEncoderLayer(d_model=configs.trans_dim, nhead=configs.num_heads, batch_first=True)
self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=3)
self.aap = nn.AdaptiveAvgPool1d(1)
self.clf = nn.Linear(hparams["feature_dim"], configs.num_classes)
def _make_layer(self, block, planes, blocks, stride=1): # makes residual SE block
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv1d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm1d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x_in):
# Multi-scale Convolutions
x1 = self.conv1(x_in)
x2 = self.conv2(x_in)
x3 = self.conv3(x_in)
x_concat = torch.mean(torch.stack([x1, x2, x3],2), 2)
x_concat = self.do(self.mp(self.relu(self.bn(x_concat))))
x = self.conv_block2(x_concat)
x = self.conv_block3(x)
# Channel Recalibration Module
x = self.crm(x)
# Bi-directional Transformer
x1 = self.transformer_encoder(x)
x2 = self.transformer_encoder(torch.flip(x,[2]))
x = x1+x2
x = self.aap(x)
x_flat = x.reshape(x.shape[0], -1)
x_out = self.clf(x_flat)
return x_out
# Path: dataloader.py
def data_generator(data_path, data_type, hparams):
# original
train_dataset = torch.load(os.path.join(data_path, data_type, f"train.pt"))
val_dataset = torch.load(os.path.join(data_path, data_type, f"val.pt"))
test_dataset = torch.load(os.path.join(data_path, data_type, f"test.pt"))
# Loading datasets
train_dataset = Load_Dataset(train_dataset)
val_dataset = Load_Dataset(val_dataset)
test_dataset = Load_Dataset(test_dataset)
cw = train_dataset.y_data.numpy().tolist()
cw_dict = {}
for i in range(len(np.unique(train_dataset.y_data.numpy()))):
cw_dict[i] = cw.count(i)
# print(cw_dict)
# Dataloaders
batch_size = hparams["batch_size"]
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size,
shuffle=True, drop_last=True, num_workers=0)
val_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=batch_size,
shuffle=False, drop_last=True, num_workers=0)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size,
shuffle=False, drop_last=False, num_workers=0)
return train_loader, val_loader, test_loader, cw_dict
# Path: configs/data_configs.py
def get_dataset_class(dataset_name):
"""Return the algorithm class with the given name."""
if dataset_name not in globals():
raise NotImplementedError("Dataset not found: {}".format(dataset_name))
return globals()[dataset_name]
# Path: configs/hparams.py
def get_hparams_class(dataset_name):
"""Return the algorithm class with the given name."""
if dataset_name not in globals():
raise NotImplementedError("Algorithm not found: {}".format(dataset_name))
return globals()[dataset_name]
# Path: utils.py
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
# Path: utils.py
def to_device(input, device):
if torch.is_tensor(input):
return input.to(device=device)
elif isinstance(input, str):
return input
elif isinstance(input, collections.Mapping):
return {k: to_device(sample, device=device) for k, sample in input.items()}
elif isinstance(input, collections.Sequence):
return [to_device(sample, device=device) for sample in input]
else:
raise TypeError("Input must contain tensor, dict or list, found {type(input)}")
# Path: utils.py
def _save_metrics(pred_labels, true_labels, log_dir, home_path, classes_names):
pred_labels = np.array(pred_labels).astype(int)
true_labels = np.array(true_labels).astype(int)
r = classification_report(true_labels, pred_labels, digits=6, output_dict=True)
df = pd.DataFrame(r)
accuracy = accuracy_score(true_labels, pred_labels)
df["accuracy"] = accuracy
df = df * 100
# save classification report
file_name = "classification_report.xlsx"
report_Save_path = os.path.join(home_path, log_dir, file_name)
df.to_excel(report_Save_path)
# Path: utils.py
def copy_Files(destination):
destination_dir = os.path.join(destination, "MODEL_BACKUP_FILES")
os.makedirs(destination_dir, exist_ok=True)
copy("main.py", os.path.join(destination_dir, "main.py"))
copy("dataloader.py", os.path.join(destination_dir, "dataloader.py"))
copy(f"models.py", os.path.join(destination_dir, f"models.py"))
copy(f"configs/data_configs.py", os.path.join(destination_dir, f"data_configs.py"))
copy(f"configs/hparams.py", os.path.join(destination_dir, f"hparams.py"))
copy(f"trainer.py", os.path.join(destination_dir, f"trainer.py"))
copy("utils.py", os.path.join(destination_dir, "utils.py"))
# Path: utils.py
def _plot_umap(model, data_loader, device, save_dir):
import umap
import umap.plot
from matplotlib.colors import ListedColormap
classes_names = ['N','S','V','F','Q']
font = {'family' : 'Times New Roman',
'weight' : 'bold',
'size' : 17}
plt.rc('font', **font)
with torch.no_grad():
# Source flow
data = data_loader.dataset.x_data.float().to(device)
labels = data_loader.dataset.y_data.view((-1)).long()
out = model[0](data)
features = model[1](out)
if not os.path.exists(os.path.join(save_dir, "umap_plots")):
os.mkdir(os.path.join(save_dir, "umap_plots"))
#cmaps = plt.get_cmap('jet')
model_reducer = umap.UMAP() #n_neighbors=3, min_dist=0.3, metric='correlation', random_state=42)
embedding = model_reducer.fit_transform(features.detach().cpu().numpy())
# Normalize the labels to [0, 1] for colormap
norm_labels = labels / 4.0
# Create a new colormap by extracting the first 5 colors from "Paired"
paired = plt.cm.get_cmap('Paired', 12) # 12 distinct colors
new_colors = [paired(0), paired(1), paired(2), paired(4), paired(6)] # Skip every second color, but take both from the first pair
new_cmap = ListedColormap(new_colors)
print("Plotting UMAP ...")
plt.figure(figsize=(16, 10))
# scatter = plt.scatter(embedding[:, 0], embedding[:, 1], c=labels, s=10, cmap='Spectral')
scatter = plt.scatter(embedding[:, 0], embedding[:, 1], c=norm_labels, cmap=new_cmap, s=15)
handles, _ = scatter.legend_elements(prop='colors')
plt.legend(handles, classes_names, title="Classes")
file_name = "umap_.png"
fig_save_name = os.path.join(save_dir, "umap_plots", file_name)
plt.xticks([])
plt.yticks([])
plt.savefig(fig_save_name, bbox_inches='tight')
plt.close()
# Path: utils.py
def fix_randomness(SEED):
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Path: utils.py
def starting_logs(data_type, exp_log_dir, seed_id):
log_dir = os.path.join(exp_log_dir, "_seed_" + str(seed_id))
os.makedirs(log_dir, exist_ok=True)
log_file_name = os.path.join(log_dir, f"logs_{datetime.now().strftime('%d_%m_%Y_%H_%M_%S')}.log")
logger = _logger(log_file_name)
logger.debug("=" * 45)
logger.debug(f'Dataset: {data_type}')
logger.debug("=" * 45)
logger.debug(f'Seed: {seed_id}')
logger.debug("=" * 45)
return logger, log_dir
# Path: utils.py
def save_checkpoint(home_path, model, dataset, dataset_configs, log_dir, hparams):
save_dict = {
"dataset": dataset,
"configs": dataset_configs.__dict__,
"hparams": dict(hparams),
"model": model[0].state_dict(),
"clf": model[1].state_dict()
}
# save classification report
save_path = os.path.join(home_path, log_dir, "checkpoint.pt")
torch.save(save_dict, save_path)
# Path: utils.py
def _calc_metrics(pred_labels, true_labels, classes_names):
pred_labels = np.array(pred_labels).astype(int)
true_labels = np.array(true_labels).astype(int)
r = classification_report(true_labels, pred_labels, target_names=classes_names, digits=6, output_dict=True)
accuracy = accuracy_score(true_labels, pred_labels)
return accuracy * 100, r["macro avg"]["f1-score"] * 100
# Path: trainer.py
import torch
import torch.nn.functional as F
import os
import collections
import numpy as np
import warnings
import sklearn.exceptions
from models import ecgTransForm
from dataloader import data_generator
from configs.data_configs import get_dataset_class
from configs.hparams import get_hparams_class
from utils import AverageMeter, to_device, _save_metrics, copy_Files, _plot_umap
from utils import fix_randomness, starting_logs, save_checkpoint, _calc_metrics
warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
warnings.simplefilter(action='ignore', category=FutureWarning)
class trainer(object):
def __init__(self, args):
# dataset parameters
self.dataset = args.dataset
self.seed_id = args.seed_id
self.device = torch.device(args.device)
# Exp Description
self.run_description = args.run_description
self.experiment_description = args.experiment_description
# paths
self.home_path = os.getcwd()
self.save_dir = os.path.join(os.getcwd(), "experiments_logs")
self.exp_log_dir = os.path.join(self.save_dir, self.experiment_description, self.run_description)
os.makedirs(self.exp_log_dir, exist_ok=True)
self.data_path = args.data_path
# Specify runs
self.num_runs = args.num_runs
# get dataset and base model configs
self.dataset_configs, self.hparams_class = self.get_configs()
# Specify hparams
self.hparams = self.hparams_class.train_params
def get_configs(self):
dataset_class = get_dataset_class(self.dataset)
hparams_class = get_hparams_class("supervised")
return dataset_class(), hparams_class()
def load_data(self, data_type):
self.train_dl, self.val_dl, self.test_dl, self.cw_dict = \
data_generator(self.data_path, data_type, self.hparams)
def calc_results_per_run(self):
acc, f1 = _calc_metrics(self.pred_labels, self.true_labels, self.dataset_configs.class_names)
return acc, f1
def train(self):
copy_Files(self.exp_log_dir) # save a copy of training files
self.metrics = {'accuracy': [], 'f1_score': []}
# fixing random seed
fix_randomness(int(self.seed_id))
# Logging
self.logger, self.scenario_log_dir = starting_logs(self.dataset, self.exp_log_dir, self.seed_id)
self.logger.debug(self.hparams)
# Load data
self.load_data(self.dataset)
model = ecgTransForm(configs=self.dataset_configs, hparams=self.hparams)
model.to(self.device)
# Average meters
loss_avg_meters = collections.defaultdict(lambda: AverageMeter())
self.optimizer = torch.optim.Adam(
model.parameters(),
lr=self.hparams["learning_rate"],
weight_decay=self.hparams["weight_decay"],
betas=(0.9, 0.99)
)
self.cross_entropy = torch.nn.CrossEntropyLoss(weight=torch.tensor(np.array(self.cw_dict.values())).float().to(self.device))
best_acc = 0
best_f1 = 0
# training..
for epoch in range(1, self.hparams["num_epochs"] + 1):
model.train()
for step, batches in enumerate(self.train_dl):
batches = to_device(batches, self.device)
data = batches['samples'].float()
labels = batches['labels'].long()
# ====== Source =====================
self.optimizer.zero_grad()
# Src original features
logits = model(data)
# Cross-Entropy loss
x_ent_loss = self.cross_entropy(logits, labels)
x_ent_loss.backward()
| self.optimizer.step() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Infotrend-Inc/OpenAI_WebUI
# Path: OpenAI_GPT.py
class OAI_GPT:
def __init__(self, apikey, save_location, models_list):
self.last_gpt_query = 'last_gpt_query'
self.apikey = apikey
self.save_location = save_location
self.models_supported = models_list
self.set_parameters(models_list)
#####
# https://platform.openai.com/docs/models/continuous-model-upgrades
def set_parameters(self, models_list):
models = {}
model_help = ""
all = {
"gpt-3.5-turbo":
{
"label": "Most capable GPT-3.5 model and optimized for chat. Will be updated with OpenAI's latest model iteration. For many basic tasks, the difference between GPT-4 and GPT-3.5 models is not significant. However, in more complex reasoning situations, GPT-4 is much more capable.",
"max_token": 4000,
"data": "Up to Sep 2021 (as of 20231108)"
},
"gpt-3.5-turbo-16k":
{
"label": "Same capabilities as the standard gpt-3.5-turbo model but with 4 times the context.",
"max_token": 16000,
"data": "Up to Sep 2021 (as of 20231108)"
},
"gpt-3.5-turbo-1106":
{
"label": "The latest GPT-3.5 Turbo model with improved instruction following, JSON mode, reproducible outputs, parallel function calling, and more. Returns a maximum of 4,096 output tokens.",
"max_token": 4000,
"data": "Up to Sep 2021 (as of 20231118)"
},
"gpt-4":
{
"label": "More capable than any GPT-3.5 model, able to do more complex tasks, and optimized for chat.",
"max_token": 8192,
"data": "Up to Sep 2021 (as of 20231108)"
},
"gpt-4-32k":
{
"label": "Same capabilities as the base gpt-4 mode but with 4x the context length.",
"max_token": 32768,
"data": "Up to Sep 2021 (as of 20231108)"
},
"gpt-4-1106-preview":
{
"label": "The latest GPT-4 model (with 128k tokens) with improved instruction following, JSON mode, reproducible outputs, parallel function calling, and more. Returns a maximum of 4,096 output tokens. This preview model is not yet suited for production traffic.",
"max_token": 4096,
"data": "Up to Apr 2023 (as of 20231108)"
}
}
s_models_list = models_list.split(",")
known_models = list(all.keys())
for t_model in s_models_list:
model = t_model.strip()
if model in all:
models[model] = all[model]
else:
st.error(f"Unknown model: {model} | Known models: {known_models}")
cf.error_exit(f"Unknown model {model}")
model_help = ""
for key in models:
model_help += key + ":\n"
model_help += models[key]["label"] + "\n"
model_help += "max_token: " + str(models[key]["max_token"]) + "\n"
model_help += "data: " + models[key]["data"] + "\n\n"
self.models = models
self.model_help = model_help
self.gpt_presets = {
"None": {
"pre": "",
"post": "",
"kwargs": {}
},
"Keywords": {
"pre": "Extract keywords from this text: ",
"post": "",
"kwargs": {"top_p": 1.0, "frequency_penalty": 0.8, "presence_penalty": 0.0}
},
"Summarization": {
"pre": "",
"post": "Tl;dr",
"kwargs": {"top_p": 1.0, "frequency_penalty": 0.0, "presence_penalty": 1}
}
}
self.gpt_presets_help = "None: regular, no additonal parameters\n\nKeywords: Extract keywords from a block of text. At a lower temperature it picks keywords from the text. At a higher temperature it will generate related keywords which can be helpful for creating search indexes.\n\nSummarization: Summarize text."
self.gpt_roles = {
'user': 'help instruct the assistant',
'system': 'helps set the behavior of the assistant (ex: "You are a helpful assistant. You also like to speak in the words of Shakespeare. Incorporate that into your responses.")',
'assistant': 'helps set the past conversations. This is relevant when you had a chat that went over the maximum number of tokens and need to start a new one: give the chat history some fresh context'
}
self.gpt_roles_help = ""
for key in self.gpt_roles:
self.gpt_roles_help += key + ":\n" + self.gpt_roles[key] + "\n\n"
#####
def get_rf_role_prompt_response(self, run_file):
run_json = cf.get_run_file(run_file)
role = ""
if 'role' in run_json:
role = run_json['role']
prompt = ""
if 'prompt' in run_json:
prompt = run_json['prompt']
response = ""
if 'response' in run_json:
response = run_json['response']
return (role, prompt, response)
#####
def get_dest_dir(self):
return os.path.join(self.save_location, "gpt", cf.get_timeUTC())
#####
def format_rpr(self, role, prompt, response):
return (f"\n\n--------------------------\n\n -- role: {role}\n\n -- prompt: {prompt}\n\n -- response: {response }\n\n")
#####
def get_chat_history(self, run_file):
run_json = cf.get_run_file(run_file)
if 'last_run_file' in run_json:
(role, prompt, response) = self.get_rf_role_prompt_response(run_file)
txt = self.format_rpr(role, prompt, response)
last_run_file = run_json['last_run_file']
if cf.isNotBlank(last_run_file):
tmp = self.get_chat_history(last_run_file)
return (self.get_chat_history(last_run_file) + txt)
else:
return (txt)
else: # last one, return the formatted text
(role, prompt, response) = self.get_rf_role_prompt_response(run_file)
return(self.format_rpr(role, prompt, response))
#####
def chatgpt_it(self, model_engine, prompt, max_tokens, temperature, dest_dir, clear_chat, role, **kwargs):
err = cf.check_existing_dir_w(dest_dir)
if cf.isNotBlank(err):
st.error(f"While checking {dest_dir}: {err}")
cf.error_exit(err)
messages = []
last_run_file = None
if not clear_chat:
# Obtain previous messages
if self.last_gpt_query in st.session_state:
run_file = st.session_state[self.last_gpt_query]
old_run_json = cf.get_run_file(run_file)
if 'messages' in old_run_json:
messages = old_run_json['messages']
last_run_file = run_file
messages.append({ 'role': role, 'content': prompt })
err, response = gpt_call(self.apikey, messages, model_engine, max_tokens, temperature, **kwargs)
if cf.isNotBlank(err):
return err, ""
runid = cf.get_runid()
run_file = f"{dest_dir}/run---{runid}.json"
run_json = {
"role": role,
"prompt": prompt,
"response": response,
'messages': messages,
'last_run_file': last_run_file,
}
with open(run_file, 'w') as f:
json.dump(run_json, f, indent=4)
return "", run_file
#####
def estimate_tokens(self, txt):
# https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them
word_count = len(txt.split())
char_count = len(txt)
return max(int(word_count / 0.75), int(char_count / 4.00))
#####
def set_ui(self):
st.sidebar.empty()
with st.sidebar:
st.text("Please check the ? for help")
model = st.selectbox("model", options=list(self.models.keys()), index=0, key="model", help=self.model_help)
m_token = self.models[model]['max_token']
role = st.selectbox("Role", options=self.gpt_roles, index=0, key="input_role", help = "Role of the input text\n\n" + self.gpt_roles_help)
clear_chat = st_toggle_switch(label="Clear chat history for next query", default_value=False, label_after=False, key="clear_chat")
max_tokens = st.slider('max_tokens', 0, m_token, 1000, 100, "%i", "max_tokens", "The maximum number of tokens to generate in the completion. The token count of your prompt plus max_tokens cannot exceed the model\'s context length.")
temperature = st.slider('temperature', 0.0, 1.0, 0.5, 0.01, "%0.2f", "temperature", "The temperature of the model. Higher temperature results in more surprising text.")
presets = st.selectbox("Preset", options=list(self.gpt_presets.keys()), index=0, key="presets", help=self.gpt_presets_help)
show_tooltip = st_toggle_switch(label="Show Tips", key="show_tips", default_value=True, label_after=False)
if show_tooltip:
stoggle('Tips', 'GPT provides a simple but powerful interface to any models. You input some text as a prompt, and the model will generate a text completion that attempts to match whatever context or pattern you gave it:<br>- The tool works on text to: answer questions, provide definitions, translate, summarize, and analyze sentiments.<br>- Keep your prompts clear and specific. The tool works best when it has a clear understanding of what you\'re asking it, so try to avoid vague or open-ended prompts.<br>- Use complete sentences and provide context or background information as needed.<br>- Some presets are available in the sidebar, check their details for more information.<br>A few example prompts (to use with "None" preset):<br>- Create a list of 8 questions for a data science interview<br>- Generate an outline for a blog post on MFT<br>- Translate "bonjour comment allez vous" in 1. English 2. German 3. Japanese<br>- write python code to display with an image selector from a local directory using OpenCV<br>- Write a creative ad and find a name for a container to run machine learning and computer vision algorithms by providing access to many common ML frameworks<br>- some models support "Chat" conversations. If you see the "Clear Chat" button, this will be one such model. They also support different max tokens, so adapt accordingly. The "Clear Chat" is here to allow you to start a new "Chat". Chat models can be given writing styles using the "system" "role"<br>More examples and hints can be found at https://platform.openai.com/examples')
prompt_value=f"GPT ({model}) Input"
prompt_value += f" (role: {role})"
prompt_value += f" [max_tokens: {max_tokens} | temperature: {temperature} | preset: {presets}]"
prompt = st.empty().text_area(prompt_value, "", placeholder="Enter your prompt", key="input")
if st.button("Request Answer", key="request_answer"):
if cf.isBlank(prompt) or len(prompt) < 10:
st.error("Please provide a prompt of at least 10 characters before requesting an answer", icon="✋")
return ()
prompt = self.gpt_presets[presets]["pre"] + prompt + self.gpt_presets[presets]["post"]
prompt_token_count = self.estimate_tokens(prompt)
requested_token_count = prompt_token_count + max_tokens
used_max_tokens = 0
if requested_token_count > self.models[model]["max_token"]:
used_max_tokens = self.models[model]["max_token"] - prompt_token_count
if used_max_tokens < 0:
st.error("You have exceeded the maximum number of tokens allowed by the model", icon="✋")
else:
st.warning("You requested %i tokens, but the model can only generate %i tokens. Requesting at max %i tokens." % (requested_token_count, self.models[model]["max_token"], used_max_tokens), icon="❌")
else:
used_max_tokens = max_tokens
if used_max_tokens > 0:
gpt_dest_dir = self.get_dest_dir()
cf.make_wdir_error(gpt_dest_dir)
with st.spinner(f"Asking OpenAI ({model} for {used_max_tokens} tokens with temperature {temperature}. Prompt est. tokens : {prompt_token_count})"):
err, run_file = self.chatgpt_it(model, prompt, used_max_tokens, temperature, gpt_dest_dir, clear_chat, role, **self.gpt_presets[presets]["kwargs"])
if cf.isNotBlank(err):
st.error(err)
if cf.isNotBlank(run_file):
st.session_state['last_gpt_query'] = run_file
st.toast("Done")
if self.last_gpt_query in st.session_state:
run_file = st.session_state[self.last_gpt_query]
run_json = cf.get_run_file(run_file)
prompt = run_json["prompt"]
response = run_json["response"]
messages = []
if 'messages' in run_json:
messages = run_json["messages"]
stoggle('Original Prompt', prompt)
chat_history = ""
if len(messages) > 0:
chat_history = self.get_chat_history(run_file)
stoggle('Chat History', chat_history)
option_list = ('Text (no wordwrap)', 'Text (wordwrap, may cause some visual inconsistencies)',
'Code (automatic highlighting for supported languages)')
option = st.selectbox('Display mode:', option_list)
if option == option_list[0]:
st.text(response)
elif option == option_list[1]:
st.markdown(response)
elif option == option_list[2]:
st.code(response)
else:
st.error("Unknown display mode")
query_output = prompt + "\n\n--------------------------\n\n" + response
if len(messages) > 1:
col1, col2, col3 = st.columns(3)
col1.download_button(label="Download Latest Result", data=response)
col2.download_button(label="Download Latest Query+Result", data=query_output)
col3.download_button(label="Download Chat Query+Result", data=chat_history)
else:
col1, col2 = st.columns(2)
col1.download_button(label="Download Result", data=response)
col2.download_button(label="Download Query+Result", data=query_output)
# Path: OpenAI_DallE.py
class OAI_DallE:
def __init__(self, apikey, save_location, models_list):
self.last_dalle_query = 'last_dalle_query'
self.apikey = apikey
self.save_location = save_location
self.models_supported = models_list
self.set_parameters(models_list)
self.dalle_modes = {
"Image": "The image generations endpoint allows you to create an original image given a text prompt. Generated images and maximum number of requested images depends on the model selected. Smaller sizes are faster to generate."
}
self.dalle_help = ""
for key in self.dalle_modes:
self.dalle_help += key + ":\n"
self.dalle_help += self.dalle_modes[key] + "\n"
#####
def set_parameters(self, models_list):
models = {}
model_help = ""
all = {
"dall-e-2":
{
"label": "The previous DALL·E model released in Nov 2022. The maximum prompt length is 1000 characters.",
"image_size": ["256x256", "512x512", "1024x1024"]
},
"dall-e-3":
{
"label": "The latest DALL·E model released in Nov 2023. The maximum prompt length is 4000 characters.",
"image_size": ["1024x1024", "1024x1792", "1792x1024"]
}
}
s_models_list = models_list.split(",")
known_models = list(all.keys())
for t_model in s_models_list:
model = t_model.strip()
if model in all:
models[model] = all[model]
else:
st.error(f"Unknown model: [{model}] | Known models: {known_models}")
cf.error_exit(f"Unknown model {model}")
model_help = ""
for key in models:
model_help += key + ":\n"
model_help += models[key]["label"] + "\n"
model_help += "image_size: " + str(models[key]["image_size"]) + "\n"
self.models = models
self.model_help = model_help
#####
def get_dest_dir(self):
request_time = datetime.today().isoformat()
return os.path.join(self.save_location, "dalle", request_time)
#####
def dalle_it(self, model, prompt, img_size, img_count, dest_dir, **kwargs):
err = cf.check_existing_dir_w(dest_dir)
if cf.isNotBlank(err):
st.error(f"While checking {dest_dir}: {err}")
cf.error_exit(err)
err, response = dalle_call(self.apikey, model, prompt, img_size, img_count, **kwargs)
if cf.isNotBlank(err):
return err, ""
info_placeholder = st.empty()
all_images = []
for i in range(img_count):
image_name = f"{dest_dir}/{i + 1}.png"
image_url = response.data[i].url
info_placeholder.info(f"Downloading result {i + 1} as {image_name}")
img_data = requests.get(image_url).content
with open(image_name, 'wb') as handler:
handler.write(img_data)
all_images.append(image_name)
info_placeholder.empty()
runid = cf.get_runid()
run_file = f"{dest_dir}/run---{runid}.json"
run_json = {
"prompt": prompt,
"images": all_images,
}
with open(run_file, 'w') as f:
json.dump(run_json, f, indent=4)
return "", run_file
#####
def display_dalle_images(self, prompt, all_images):
img = image_select("Prompt: " + prompt, all_images, use_container_width=False)
st.image(img)
path = pathlib.PurePath(img)
wdir = path.parent.name
wfile = path.name
dfile = f"{wdir}-{wfile}"
st.download_button("Download Selected", data=open(img, 'rb').read(), file_name=dfile, mime="image/png", key="dalle_download_button")
#####
def set_ui(self):
st.sidebar.empty()
with st.sidebar:
st.text("Please check the ? for help")
mode = st.selectbox("mode", options=list(self.dalle_modes.keys()), index=0, key="dalle_mode", help=self.dalle_help)
model = st.selectbox("model", options=list(self.models.keys()), index=0, key="model", help=self.model_help)
model_image_size = self.models[model]["image_size"]
img_size = st.selectbox("image size", options=model_image_size, index=0, key="dalle_image_size",
help="Smaller sizes are faster to generate.")
if model == "dall-e-2":
img_count = st.number_input("number of images", min_value=1, max_value=10, value=1, step=1, key="dalle_img_count",
help="Number of images to generate.")
else:
img_count = 1
kwargs = {}
if model == "dall-e-3":
quality = st.selectbox("quality", options=["standard", "hd"], index=0, key="dalle_quality", help="The quality of the image that will be generated. hd creates images with finer details and greater consistency across the image.")
style = st.selectbox("style", options=["vivid", "natural"], index=0, key="dalle_style", help="The style of the generated images. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images.")
kwargs = {"quality": quality, "style": style}
show_tooltip = st_toggle_switch(label="Show Tips", key="show_tips", default_value=True, label_after=False)
if show_tooltip:
stoggle(
'Tips',
'DALL·E is an AI system that creates realistic images and art from a description in natural language.<br>- The more detailed the description, the more likely you are to get the result that you or your end user want'
)
prompt_value=f"DallE {model} Input [image size: {img_size} | image count: {img_count} | Extra: {kwargs}]"
prompt = st.empty().text_area(prompt_value, "", placeholder="Enter your prompt", key="dalle_input")
if st.button("Submit Request", key="dalle_request_answer"):
if cf.isBlank(prompt) or len(prompt) < 10:
st.error("Please provide a prompt of at least 10 characters before requesting an answer", icon="✋")
return ()
dalle_dest_dir = self.get_dest_dir()
cf.make_wdir_error(dalle_dest_dir)
with st.spinner(f"Asking OpenAI for a response..."):
err, run_file = self.dalle_it(model, prompt, img_size, img_count, dalle_dest_dir, **kwargs)
if cf.isNotBlank(err):
st.error(err)
if cf.isNotBlank(run_file):
st.session_state['last_dalle_query'] = run_file
st.toast("Done")
if self.last_dalle_query in st.session_state:
run_file = st.session_state[self.last_dalle_query]
run_json = cf.get_run_file(run_file)
self.display_dalle_images(run_json['prompt'], run_json['images'])
# Path: OpenAI_WebUI.py
import streamlit as st
import extra_streamlit_components as stx
import re
import os.path
import common_functions as cf
from OpenAI_GPT import OAI_GPT
from OpenAI_DallE import OAI_DallE
from dotenv import load_dotenv
from datetime import datetime
#!/usr/bin/env python3
# Based on
# https://platform.openai.com/docs/quickstart/build-your-application
# https://github.com/openai/openai-python
#####
iti_version="0.9.1"
st.set_page_config(page_title=f"OpenAI API WebUI ({iti_version})", page_icon="🫥", layout="wide", initial_sidebar_state="expanded", menu_items={'Get Help': 'https://github.com/Infotrend-Inc/OpenAI_WebUI', 'About': f"# OpenAI WebUI ({iti_version})\n Brought to you by [Infotrend Inc.](https://www.infotrend.com/)"})
#####
def main():
err = cf.check_file_r(".env", "Environment file")
if cf.isBlank(err):
load_dotenv()
# If the file is not present, hopefully the variable was set in the Docker environemnt
apikey = ''
if 'OPENAI_API_KEY' in os.environ:
apikey = os.environ.get('OPENAI_API_KEY')
if cf.isBlank(apikey):
st.error(f"Could not find the OPENAI_API_KEY environment variable")
cf.error_exit(f"Could not find the OPENAI_API_KEY environment variable")
save_location = ""
if 'OAIWUI_SAVEDIR' in os.environ:
save_location = os.environ.get('OAIWUI_SAVEDIR')
if cf.isBlank(save_location):
st.error(f"Could not find the OAIWUI_SAVEDIR environment variable")
cf.error_exit("Could not find the OAIWUI_SAVEDIR environment variable")
err = cf.check_existing_dir_w(save_location, "OAIWUI_SAVEDIR directory")
if cf.isNotBlank(err):
st.error(f"While ching OAIWUI_SAVEDIR: {err}")
cf.error_exit(f"{err}")
gpt_models = ""
if 'OAIWUI_GPT_MODELS' in os.environ:
gpt_models = os.environ.get('OAIWUI_GPT_MODELS')
else:
st.error(f"Could not find the OAIWUI_GPT_MODELS environment variable")
cf.error_exit("Could not find the OAIWUI_GPT_MODELS environment variable")
if cf.isBlank(gpt_models):
st.error(f"OAIWUI_GPT_MODELS environment variable is empty")
cf.error_exit("OAIWUI_GPT_MODELS environment variable is empty")
dalle_models = ""
if 'OAIWUI_DALLE_MODELS' in os.environ:
dalle_models = os.environ.get('OAIWUI_DALLE_MODELS')
else:
st.error(f"Could not find the OAIWUI_DALLE_MODELS environment variable")
cf.error_exit("Could not find the OAIWUI_DALLE_MODELS environment variable")
if cf.isBlank(dalle_models):
st.error(f"OAIWUI_DALLE_MODELS environment variable is empty")
cf.error_exit("OAIWUI_DALLE_MODELS environment variable is empty")
username = ""
if 'OAIWUI_USERNAME' in os.environ:
username = os.environ.get('OAIWUI_USERNAME')
if cf.isBlank(username):
st.warning(f"OAIWUI_USERNAME provided but empty, will ask for username")
else:
| st.session_state['username'] = username |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: daily-demos/ai-meeting-assistant
# Path: server/config.py
class BotConfig:
_openai_api_key: str = None
_openai_model_name: str = None
_log_dir_path: str = None
_daily_room_url: str = None
_daily_meeting_token: str = None
def __init__(self,
openai_api_key: str,
openai_model_name: str,
daily_room_url: str = None,
daily_meeting_token: str = None,
log_dir_path: str = None):
self._openai_api_key = openai_api_key
self._openai_model_name = openai_model_name
self._log_dir_path = log_dir_path
self._daily_room_url = daily_room_url
self._daily_meeting_token = daily_meeting_token
@property
def openai_model_name(self) -> str:
return self._openai_model_name
@property
def openai_api_key(self) -> str:
return self._openai_api_key
@property
def log_dir_path(self) -> str:
return self._log_dir_path
@property
def daily_room_url(self) -> str:
return self._daily_room_url
@property
def daily_meeting_token(self) -> str:
return self._daily_meeting_token
def get_log_file_path(self, room_name: str) -> str | None:
"""Returns the log file for the given room name"""
if not self.log_dir_path:
return None
return os.path.join(self.log_dir_path, f"{room_name}.log")
def ensure_dirs(self):
"""Creates required file directories if they do not already exist."""
if self.log_dir_path:
ensure_dir(self.log_dir_path)
# Path: server/config.py
def get_headless_config() -> BotConfig:
dotenv_path = join(dirname(dirname(abspath(__file__))), '.env')
load_dotenv(dotenv_path)
parser = argparse.ArgumentParser(description='Start a session.')
parser.add_argument(
'--room_url',
type=str,
default=os.environ.get('ROOM_URL'),
help='URL of the room')
parser.add_argument(
'--oai_api_key',
type=str,
default=os.environ.get('OPENAI_API_KEY'),
help='OpenAI API key')
parser.add_argument(
'--oai_model_name',
type=str,
default=os.environ.get('OPENAI_MODEL_NAME'),
help='OpenAI API URL')
parser.add_argument(
'--daily_meeting_token',
type=str,
default=None,
help='Daily meetng token')
parser.add_argument(
'--log_dir_name',
type=str,
default=None,
help='Log dir name')
args = parser.parse_args()
ldn = args.log_dir_name
ldp = None
if ldn:
ldp = os.path.abspath(ldn)
return BotConfig(args.oai_api_key, args.oai_model_name,
args.room_url, args.daily_meeting_token, ldp)
# Path: server/llm/openai_assistant.py
class OpenAIAssistant(Assistant):
"""Class that implements assistant features using the OpenAI API"""
_client: OpenAI = None
_oai_assistant_id: int = None
_oai_summary_thread_id: int = None
_model_name: str = None
_logger: logging.Logger = None
# For now, just store context in memory.
_raw_context: deque([ChatCompletionMessageParam]) = None
_clean_transcript: str = None
_clean_transcript_running: bool = False
_summary_context: str = None
# Process 20 context items at a time.
_transcript_batch_size: int = 25
_default_transcript_prompt = ChatCompletionSystemMessageParam(content="""
Using the exact transcript provided in the previous messages, convert it into a cleaned-up, paragraphed format. It is crucial that you strictly adhere to the content of the provided transcript without adding or modifying any of the original dialogue. Your tasks are to:
1. Correct punctuation and spelling mistakes.
2. Merge broken sentences into complete ones.
3. Remove timestamps and transcript types.
4. Clearly indicate the speaker's name at the beginning of their dialogue.
Do not add any new content or dialogue that was not present in the original transcript. The focus is on cleaning and reformatting the existing content for clarity and readability.
""",
role="system")
_default_prompt = """
Primary Instruction:
Based on the provided meeting transcripts, please create a concise summary.
Your summary should include:
1. Key discussion points.
2. Decisions made.
3. Action items assigned.
Keep the summary within six sentences, ensuring it captures the essence of the conversation. Structure it in clear, digestible parts for easy understanding. Rely solely on information from the transcript; do not infer or add information not explicitly mentioned. Exclude any square brackets, tags, or timestamps from the summary. Instead of re-parsing the entire context, use previous summaries you've generated to inform the completion of each new summary. Each summary should be holistic and represent the entire call.
"""
def __init__(self, api_key: str, model_name: str = None,
logger: logging.Logger = None):
if not api_key:
raise Exception("OpenAI API key not provided, but required.")
self._raw_context = deque()
self._summary_context = ""
self._clean_transcript = ""
self._logger = logger
if not model_name:
model_name = "gpt-4-1106-preview"
self._model_name = model_name
self._client = OpenAI(
api_key=api_key,
)
self._oai_assistant_id = self.get_or_create_assistant(model_name)
def get_or_create_assistant(self, model_name) -> str:
"""Gets or creates an OpenAI assistant"""
all_assistants = self._client.beta.assistants.list()
for assistant in all_assistants.data:
if assistant.name == _assistant_name and assistant.instructions == self._default_prompt:
return assistant.id
return self._client.beta.assistants.create(name=_assistant_name, description="Daily meeting summary assistant",
instructions=self._default_prompt,
model=model_name).id
def destroy(self):
"""Destroys the assistant and relevant resources"""
self._logger.info(
"Destroying thread (%s) and assistant (%s)",
self._oai_summary_thread_id,
self._oai_assistant_id)
bc = self._client.beta
if self._oai_summary_thread_id:
bc.threads.delete(self._oai_summary_thread_id)
if self._oai_assistant_id:
bc.assistants.delete(self._oai_assistant_id)
def register_new_context(self, new_text: str, metadata: list[str] = None):
"""Registers new context (usually a transcription line)."""
content = self._compile_ctx_content(new_text, metadata)
user_msg = ChatCompletionUserMessageParam(content=content, role="user")
self._raw_context.append(user_msg)
def get_clean_transcript(self) -> str:
"""Returns latest clean transcript."""
return self._clean_transcript
async def cleanup_transcript(self) -> str:
"""Cleans up transcript from raw context."""
if self._clean_transcript_running:
raise Exception("Clean transcript process already running")
# Set this bool to ensure only one cleanup process
# is running at a time.
self._clean_transcript_running = True
if len(self._raw_context) == 0:
self._clean_transcript_running = False
raise NoContextError()
if self._oai_summary_thread_id:
active_runs = self._client.beta.threads.runs.list(
self._oai_summary_thread_id)
if len(active_runs.data) > 0:
self._clean_transcript_running = False
active_statuses = ["in-progress"]
for run in active_runs.data:
if run.status in active_statuses:
self._logger.info(
"Active run, won't clean transcript: %s (%s)", run, run.status)
return
# How many transcript lines to process
to_fetch = self._transcript_batch_size
to_process = []
ctx = self._raw_context
# Fetch the next batch of transcript lines
while to_fetch > 0 and ctx:
next_line = ctx.popleft()
to_process.append(next_line)
# If we're at the end of the batch size but did not
# get what appears to be a full sentence, just keep going.
if to_fetch == 1 and "." not in next_line.content:
continue
to_fetch -= 1
messages = to_process + [self._default_transcript_prompt]
try:
loop = asyncio.get_event_loop()
future = loop.run_in_executor(
None, self._make_openai_request, messages)
res = await future
self._clean_transcript += f"\n\n{res}"
# Create a new OpenAI summary thread if it does not yet exist.
if not self._oai_summary_thread_id:
self._create_summary_thread()
# Append new message with this batch of cleaned-up transcript to
# thread
self._client.beta.threads.messages.create(
self._oai_summary_thread_id, content=res, role="user")
self._clean_transcript_running = False
except Exception as e:
# Re-insert failed items into the queue,
# to make sure they do not get lost on next attempt.
for item in reversed(to_process):
self._raw_context.appendleft(item)
self._clean_transcript_running = False
raise Exception(f"Failed to query OpenAI: {e}") from e
def _create_summary_thread(self):
"""Creates a new OpenAI thread to store the summary context in"""
thread = self._client.beta.threads.create()
self._oai_summary_thread_id = thread.id
async def query(self, custom_query: str = None) -> str:
"""Submits a query to OpenAI with the stored context if one is provided.
If a query is not provided, uses the default."""
if not self._oai_summary_thread_id:
raise NoContextError()
try:
loop = asyncio.get_event_loop()
future: asyncio.Future = None
if not custom_query:
future = loop.run_in_executor(
None, self._make_openai_thread_request, self._oai_summary_thread_id)
else:
future = loop.run_in_executor(
None, self._make_openai_request, [
ChatCompletionUserMessageParam(
content=self._clean_transcript, role="user"),
ChatCompletionSystemMessageParam(content=custom_query, role="system")])
res = await future
return res
except Exception as e:
if "No assistant found" in str(e):
self._oai_assistant_id = self.get_or_create_assistant(self._model_name)
return await self.query(custom_query)
raise Exception(f"Failed to query OpenAI thread: {e}") from e
def _compile_ctx_content(self, new_text: str,
metadata: list[str] = None) -> str:
"""Compiles context content from the provided text and metadata."""
content = ""
if metadata:
content += f"[{' | '.join(metadata)}] "
content += new_text
return content
def _make_openai_request(
self, messages: list[ChatCompletionMessageParam]) -> str:
"""Makes a chat completion request to OpenAI and returns the response."""
res = self._client.chat.completions.create(
model=self._model_name,
messages=messages,
temperature=0,
)
for choice in res.choices:
reason = choice.finish_reason
if reason == "stop" or reason == "length":
answer = choice.message.content
return answer
raise Exception(
"No usable choice found in OpenAI response: %s",
res.choices)
def _make_openai_thread_request(
self, thread_id: list) -> str:
"""Creates a thread run and returns the response."""
threads = self._client.beta.threads
run = threads.runs.create(
assistant_id=self._oai_assistant_id,
thread_id=thread_id,
)
while run.status != "completed":
run = threads.runs.retrieve(
thread_id=thread_id,
run_id=run.id
)
messages = threads.messages.list(
thread_id=thread_id,
)
msg_data = messages.data[0]
answer = msg_data.content[0].text.value
return answer
# Path: server/llm/assistant.py
class Assistant(ABC):
"""Abstract class defining methods that should be implemented by any assistant"""
@abstractmethod
def register_new_context(self, new_text: str,
name: list[str] = None) -> str:
"""Registers new context (usually a transcription line)."""
@abstractmethod
async def query(self, custom_query: str) -> str:
"""Runs a query against the assistant and returns the answer."""
@abstractmethod
def get_clean_transcript(self) -> str:
"""Returns latest clean transcript."""
@abstractmethod
async def cleanup_transcript(self) -> str:
"""Cleans up transcript from raw context."""
@abstractmethod
def destroy(self) -> str:
"""Destroys the assistant."""
# Path: server/llm/assistant.py
class NoContextError(Exception):
"""Raised when a query is made but no context is available"""
def __init__(self):
m = "No context available."
super().__init__(m)
# Path: server/call/session.py
import asyncio
import atexit
import dataclasses
import json
import logging
import os.path
import sys
import threading
import time
from asyncio import Future
from datetime import datetime
from logging import Logger
from typing import Mapping, Any
from urllib.parse import urlparse
from daily import Daily, EventHandler, CallClient
from server.config import BotConfig, get_headless_config
from server.llm.openai_assistant import OpenAIAssistant
from server.llm.assistant import Assistant, NoContextError
"""Class representing a single meeting happening within a Daily room.
This is responsible for all Daily operations."""
from __future__ import annotations
@dataclasses.dataclass
class Room:
"""Class representing a Daily video call room"""
url: str = None
token: str = None
name: str = None
@dataclasses.dataclass
class Summary:
"""Class representing a Daily meeting summary"""
content: str
retrieved_at: time.time()
class Session(EventHandler):
"""Class representing a single meeting happening within a Daily room."""
_config: BotConfig
_assistant: Assistant
_summary: Summary | None
# Daily-related properties
_id: str | None
_call_client: CallClient | None
_room: Room
# Shutdown-related properties
_is_destroyed: bool
_shutdown_timer: threading.Timer | None = None
def __init__(self, config: BotConfig):
super().__init__()
self._is_destroyed = False
self._config = config
self._summary = None
self._id = None
self._room = self._get_room_config(self._config.daily_room_url)
self._logger = self.create_logger(self._room.name)
self._assistant = OpenAIAssistant(
config.openai_api_key,
config.openai_model_name,
self._logger)
self._logger.info("Initialized session %s", self._room.name)
def start(self):
# Start session on new thread
task = threading.Thread(target=self._run)
task.start()
while not self.is_destroyed:
time.sleep(1)
@property
def room_url(self) -> str:
return self._room.url
@property
def id(self) -> str:
return self._id
@property
def is_destroyed(self) -> bool:
return self._is_destroyed
def _get_room_config(self, room_url: str = None) -> Room:
"""Creates a Daily room and uses it to start a session"""
parsed_url = urlparse(room_url)
room_name = os.path.basename(parsed_url.path)
token = self._config.daily_meeting_token
room = Room(url=room_url, name=room_name, token=token)
return room
def _run(self):
"""Waits for at least one person to join the associated Daily room,
then joins, starts transcription, and begins registering context."""
call_client = CallClient(event_handler=self)
self._call_client = call_client
room = self._room
self._logger.info("Joining Daily room %s", room.url)
call_client.join(
room.url,
room.token,
completion=self.on_joined_meeting)
| async def _generate_clean_transcript(self) -> bool: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: tiendatnguyen-vision/Orbit-symmetrize
# Path: RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operator_base.py
class LinearOperator(nn.Module):
""" Common interface for performing matrix vector products
Many iterative methods (e.g. cg, gmres) do not need to know the
individual entries of a matrix to solve a linear system A*x=b.
Such solvers only require the computation of matrix vector
products, A*v where v is a dense vector. This class serves as
an abstract interface between iterative solvers and matrix-like
objects.
To construct a concrete LinearOperator, either pass appropriate
callables to the constructor of this class, or subclass it.
A subclass must implement either one of the methods ``_matvec``
and ``_matmat``, and the attributes/properties ``shape`` (pair of
integers) and ``dtype`` (may be None). It may call the ``__init__``
on this class to have these attributes validated. Implementing
``_matvec`` automatically implements ``_matmat`` (using a naive
algorithm) and vice-versa.
Optionally, a subclass may implement ``_rmatvec`` or ``_adjoint``
to implement the Hermitian adjoint (conjugate transpose). As with
``_matvec`` and ``_matmat``, implementing either ``_rmatvec`` or
``_adjoint`` implements the other automatically. Implementing
``_adjoint`` is preferable; ``_rmatvec`` is mostly there for
backwards compatibility.
Parameters
----------
shape : tuple
Matrix dimensions (M, N).
matvec : callable f(v)
Returns returns A * v.
rmatvec : callable f(v)
Returns A^H * v, where A^H is the conjugate transpose of A.
matmat : callable f(V)
Returns A * V, where V is a dense matrix with dimensions (N, K).
dtype : dtype
Data type of the matrix.
rmatmat : callable f(V)
Returns A^H * V, where V is a dense matrix with dimensions (M, K).
Attributes
----------
args : tuple
For linear operators describing products etc. of other linear
operators, the operands of the binary operation.
ndim : int
Number of dimensions (this is always 2)
See Also
--------
aslinearoperator : Construct LinearOperators
Notes
-----
The user-defined matvec() function must properly handle the case
where v has shape (N,) as well as the (N,1) case. The shape of
the return type is handled internally by LinearOperator.
LinearOperator instances can also be multiplied, added with each
other and exponentiated, all lazily: the result of these operations
is always a new, composite LinearOperator, that defers linear
operations to the original operators and combines the results.
More details regarding how to subclass a LinearOperator and several
examples of concrete LinearOperator instances can be found in the
external project `PyLops <https://pylops.readthedocs.io>`_.
Examples
--------
>>> def mv(v):
... return torch.tensor([2*v[0], 3*v[1]])
...
>>> A = LinearOperator((2,2), matvec=mv)
>>> A
<2x2 _CustomLinearOperator with dtype=float64>
>>> A.matvec(torch.ones(2))
tensor([ 2., 3.])
>>> A * torch.ones(2)
tensor([ 2., 3.])
"""
def __new__(cls, *args, **kwargs):
if cls is LinearOperator:
# Operate as _CustomLinearOperator factory.
return super(LinearOperator, cls).__new__(_CustomLinearOperator)
obj = super(LinearOperator, cls).__new__(cls)
if (type(obj)._matvec == LinearOperator._matvec
and type(obj)._matmat == LinearOperator._matmat):
warnings.warn("LinearOperator subclass should implement"
" at least one of _matvec and _matmat.",
category=RuntimeWarning, stacklevel=2)
return obj
def __init__(self):
super().__init__()
self.ndim = 2
self.dtype = None
self.shape = None
self.device = None
def init(self, dtype, shape, device):
""" Initialize this LinearOperator.
To be called by subclasses. ``dtype`` may be None; ``shape`` should
be convertible to a length-2 tuple.
Called from subclasses at the end of the __init__ routine.
"""
if dtype is None:
dtype = torch.float # force float 32
else:
if not isinstance(dtype, torch.dtype):
dtype = torch_dtype(dtype)
shape = tuple(shape)
if not isshape(shape):
raise ValueError(f"invalid shape {(shape,)} (must be 2-d)")
self.dtype = dtype
self.shape = torch.Size(shape)
self.device = torch_device(device)
def size(self, dim=None):
""" Return the size of this LinearOperator.
This is a synonym for ``shape``.
"""
return self.shape if dim is None else self.shape[dim]
def _matmat(self, V):
""" Default matrix-matrix multiplication handler.
Falls back on the user-defined _matvec method, so defining that will
define matrix multiplication (though in a very suboptimal way).
"""
return torch.hstack([self.matvec(col.reshape(-1, 1)) for col in V.T])
def _matvec(self, v):
""" Default matrix-vector multiplication handler.
If self is a linear operator of shape (M, N), then this method will
be called on a shape (N,) or (N, 1) ndarray, and should return a
shape (M,) or (M, 1) ndarray.
This default implementation falls back on _matmat, so defining that
will define matrix-vector multiplication as well.
"""
return self.matmat(v.reshape(-1, 1))
def matvec(self, v):
""" Matrix-vector multiplication.
Performs the operation y=A*v where A is an MxN linear
operator and v is a column vector or 1-d array.
Parameters
----------
v : {matrix, ndarray}
An array with shape (N,) or (N,1).
Returns
-------
y : {matrix, ndarray}
A matrix or ndarray with shape (M,) or (M,1) depending
on the type and shape of the x argument.
Notes
-----
This matvec wraps the user-specified matvec routine or overridden
_matvec method to ensure that y has the correct shape and type.
"""
M, N = self.shape
if v.shape != (N,) and v.shape != (N, 1):
raise ValueError('dimension mismatch')
y = self._matvec(v)
if v.ndim == 1:
y = y.reshape(M)
elif v.ndim == 2:
y = y.reshape(M, 1)
else:
raise ValueError('invalid shape returned by user-defined matvec()')
return y
def rmatvec(self, v):
""" Adjoint matrix-vector multiplication.
Performs the operation y = A^H * v where A is an MxN linear
operator and v is a column vector or 1-d array.
Parameters
----------
v : {matrix, ndarray}
An array with shape (M,) or (M,1).
Returns
-------
y : {matrix, ndarray}
A matrix or ndarray with shape (N,) or (N,1) depending
on the type and shape of the v argument.
Notes
-----
This rmatvec wraps the user-specified rmatvec routine or overridden
_rmatvec method to ensure that y has the correct shape and type.
"""
M, N = self.shape
if v.shape != (M,) and v.shape != (M, 1):
raise ValueError('dimension mismatch')
y = self._rmatvec(v)
if v.ndim == 1:
y = y.reshape(N)
elif v.ndim == 2:
y = y.reshape(N, 1)
else:
raise ValueError('invalid shape returned by user-defined rmatvec()')
return y
def _rmatvec(self, v):
""" Default implementation of _rmatvec; defers to adjoint. """
if type(self)._adjoint == LinearOperator._adjoint:
# _adjoint not overridden, prevent infinite recursion
raise NotImplementedError
return self.H().matvec(v)
def matmat(self, V):
""" Matrix-matrix multiplication.
Performs the operation y=A*V where A is an MxN linear
operator and V dense N*K matrix or ndarray.
Parameters
----------
V : {matrix, ndarray}
An array with shape (N,K).
Returns
-------
Y : {matrix, ndarray}
A matrix or ndarray with shape (M,K) depending on
the type of the V argument.
Notes
-----
This matmat wraps any user-specified matmat routine or overridden
_matmat method to ensure that y has the correct type.
"""
if V.ndim != 2:
raise ValueError(f'expected 2-d ndarray or matrix, not {V.ndim}-d')
if V.size(0) != self.size(1):
raise ValueError(f'dimension mismatch: {self.shape}, {V.shape}')
Y = self._matmat(V)
return Y
def rmatmat(self, V):
""" Adjoint matrix-matrix multiplication.
Performs the operation y = A^H * V where A is an MxN linear
operator and V is a column vector or 1-d array, or 2-d array.
The default implementation defers to the adjoint.
Parameters
----------
V : {matrix, ndarray}
A matrix or 2D array.
Returns
-------
Y : {matrix, ndarray}
A matrix or 2D array depending on the type of the input.
Notes
-----
This rmatmat wraps the user-specified rmatmat routine.
"""
if V.ndim != 2:
raise ValueError(f'expected 2-d matrix, not {V.ndim}-d')
if V.size(0) != self.size(0):
raise ValueError(f'dimension mismatch: {self.shape}, {V.shape}')
Y = self._rmatmat(V)
return Y
def _rmatmat(self, V):
""" Default implementation of _rmatmat defers to rmatvec or adjoint. """
if type(self)._adjoint == LinearOperator._adjoint:
return torch.hstack([self.rmatvec(col.reshape(-1, 1)) for col in V.T])
return self.H().matmat(V)
def forward(self, v):
""" Matrix-vector or matrix-matrix multiplication. """
return self*v
def __mul__(self, v):
return self.dot(v)
def dot(self, v):
""" Matrix-matrix or matrix-vector multiplication.
Parameters
----------
v : array_like
1-d or 2-d array, representing a vector or matrix.
Returns
-------
Av : array
1-d or 2-d array (depending on the shape of x) that represents
the result of applying this linear operator on x.
"""
if isinstance(v, LinearOperator):
return _ProductLinearOperator(self, v)
if torch.is_tensor(v):
if v.ndim == 0:
return _ScaledLinearOperator(self, v)
if v.ndim == 1 or v.ndim == 2 and v.size(1) == 1:
return self.matvec(v)
if v.ndim == 2:
return self.matmat(v)
raise ValueError(f'expected 1-d or 2-d array or matrix, got {v}')
def __matmul__(self, other):
if isscalar(other):
raise ValueError("Scalar operands are not allowed, use '*' instead")
return self.__mul__(other)
def __rmatmul__(self, other):
if isscalar(other):
raise ValueError("Scalar operands are not allowed, use '*' instead")
return self.__rmul__(other)
def __rmul__(self, x):
if isscalar(x):
return _ScaledLinearOperator(self, x)
return NotImplemented
def __pow__(self, p):
if isscalar(p):
return _PowerLinearOperator(self, p)
return NotImplemented
def __add__(self, x):
if isinstance(x, LinearOperator):
return _SumLinearOperator(self, x)
if torch.is_tensor(x) and x.ndim == 2:
return _SumLinearOperator(self, Lazy(x))
return NotImplemented
def __radd__(self, x):
return self.__add__(x)
def __neg__(self):
return _ScaledLinearOperator(self, -1)
def __sub__(self, x):
return self.__add__(-x)
def __repr__(self):
M, N = self.shape
if self.dtype is None:
dtype = 'unspecified dtype'
else:
dtype = 'dtype=' + str(self.dtype)
return f'<{M}x{N} {self.__class__.__name__} with {dtype}>'
def adjoint(self):
""" Hermitian adjoint.
Returns the Hermitian adjoint of self, aka the Hermitian
conjugate or Hermitian transpose. For a complex matrix, the
Hermitian adjoint is equal to the conjugate transpose.
Can be abbreviated self.H instead of self.adjoint().
Returns
-------
A_H : LinearOperator
Hermitian adjoint of self.
"""
return self._adjoint()
def H(self):
""" Hermitian adjoint. """
return self.adjoint()
def transpose(self):
""" Transpose this linear operator.
Returns a LinearOperator that represents the transpose of this one.
Can be abbreviated self.T instead of self.transpose().
"""
return self._transpose()
def t(self):
""" Transpose this linear operator. """
return self.transpose()
def _adjoint(self):
""" Default implementation of _adjoint; defers to rmatvec. """
return _AdjointLinearOperator(self)
def _transpose(self):
""" Default implementation of _transpose; defers to rmatvec + conj"""
return _TransposedLinearOperator(self)
def invt(self):
""" Default implementation of inverse transpose; defers to inv + T """
return (self ** -1).transpose()
def to_dense(self):
""" Default implementation of to_dense which produces the dense
matrix corresponding to the given lazy matrix. Defaults to
multiplying by the identity """
return [email protected](self.size(-1), device=self.device)
def to(self, device):
""" Move this linear operator to a new device. """
self.device = torch.empty(0).to(device).device
return self
# Path: RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operator_base.py
class Lazy(LinearOperator):
""" Linear operator with lazy evaluation """
def __init__(self, dense_matrix):
super().__init__()
self.A = dense_matrix
self.init(self.A.dtype, self.A.shape, self.A.device)
def _matmat(self, V):
A, V = device_cast(self.A, V)
A, V = dtype_cast(A, V)
return A@V
def _matvec(self, v):
A, v = device_cast(self.A, v)
A, v = dtype_cast(A, v)
return A@v
def _rmatmat(self, V):
A, V = device_cast(self.A, V)
A, V = dtype_cast(A, V)
return A.t()@V
def _rmatvec(self, v):
A, v = device_cast(self.A, v)
A, v = dtype_cast(A, v)
return A.t()@v
def to_dense(self):
return self.A
def invt(self):
return Lazy(torch.linalg.inv(self.A).t())
def to(self, device):
self.A = self.A.to(device)
self.device = self.A.device
return self
# Path: RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/utils.py
def dtype_cast(A, B):
""" Casts A and B to the same dtype, preferring complex dtypes over real dtypes. """
if A.dtype in (torch.complex64, torch.complex128):
B = B.to(A.dtype)
if B.dtype in (torch.complex64, torch.complex128):
A = A.to(B.dtype)
return A, B
# Path: RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/utils.py
def device_cast(A, B):
""" Casts A and B to the same device, preferring GPU over CPU. """
if A.device.type == 'cuda':
B = B.to(A.device)
if B.device.type == 'cuda':
A = A.to(B.device)
return A, B
# Path: RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/utils.py
def get_device(operators, devices=None):
""" Returns the device of the first operator that has a device attribute. """
if devices is None:
devices = []
for obj in operators:
if obj is not None and hasattr(obj, 'device') and obj.device.type != 'cpu':
return obj.device
return torch.device('cpu')
# Path: RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operators.py
from functools import reduce
from .linear_operator_base import LinearOperator, Lazy
from .utils import dtype_cast, device_cast, get_device
import torch
class LazyKronsum(LinearOperator):
""" Lazy tensor sum. """
def __init__(self, Ms):
super().__init__()
self.Ms = Ms
shape = product([Mi.size(0) for Mi in Ms]), product([Mi.size(1) for Mi in Ms])
dtype = torch.float
device = get_device(Ms)
self.init(dtype, shape, device)
self.to(self.device)
def _matvec(self, v):
return self._matmat(v).reshape(-1)
def _matmat(self, V):
eV = V.reshape(*[Mi.size(-1) for Mi in self.Ms], -1)
out = 0*eV
for i, M in enumerate(self.Ms):
eV_front = torch.movedim(eV, i, 0)
M, eV_front = dtype_cast(M, eV_front)
MeV_front = (M@eV_front.reshape(M.size(-1), -1)).reshape(M.size(0), *eV_front.shape[1:])
out, MeV_front = dtype_cast(out, MeV_front)
out += torch.movedim(MeV_front, 0, i)
return out.reshape(self.size(0), eV.size(-1))
def _adjoint(self):
return LazyKronsum([Mi.t() for Mi in self.Ms])
def to_dense(self):
Ms = [M.to_dense() if isinstance(M, LinearOperator) else M for M in self.Ms]
return reduce(kronsum, Ms)
def __new__(cls, Ms):
if len(Ms) == 1:
return Ms[0]
return super().__new__(cls)
# could also be implemented as follows,
# but fusing the sum into a single linearOperator is faster
# def lazy_kronsum(Ms):
# n = len(Ms)
# lprod = np.cumprod([1]+[mi.size(-1) for mi in Ms])
# rprod = np.cumprod([1]+[mi.size(-1) for mi in reversed(Ms)])[::-1]
# return reduce(lambda a,b: a+b,[lazy_kron([I(lprod[i]),Mi,I(rprod[i+1])])
# for i,Mi in enumerate(Ms)])
def to(self, device):
self.Ms = [M.to(device) for M in self.Ms]
self.device = torch.empty(0).to(device).device
return self
class LazyJVP(LinearOperator):
""" Lazy Jacobian-vector product. """
def __init__(self, operator_fn, X, TX):
super().__init__()
self.operator_fn = operator_fn
self.X = X
self.TX = TX
self.init(torch.float, operator_fn(X).shape, X.device)
self.to(self.device)
def vjp(self, v):
""" Computes the vector-Jacobian product """
return torch.autograd.functional.jvp(
lambda x: self.operator_fn(x)@v, [self.X], [self.TX])[1]
def vjp_T(self, v):
""" Computes the vector-Jacobian product """
return torch.autograd.functional.jvp(
lambda x: self.operator_fn(x).t()@v, [self.X], [self.TX])[1]
def _matmat(self, V):
return self.vjp(V)
def _matvec(self, v):
return self.vjp(v)
def _rmatmat(self, V):
return self.vjp_T(V)
def to(self, device):
self.X = self.X.to(device)
self.TX = self.TX.to(device)
self.device = self.X.device
return self
class ConcatLazy(LinearOperator):
""" Produces a linear operator equivalent to concatenating
a collection of matrices Ms along axis=0 """
def __init__(self, Ms):
super().__init__()
self.Ms = Ms
assert all(M.size(0) == Ms[0].size(0) for M in Ms),\
f"Trying to concatenate matrices of different sizes {[M.shape for M in Ms]}"
shape = (sum(M.size(0) for M in Ms), Ms[0].size(1))
device = get_device(Ms)
self.init(None, shape, device)
self.to(self.device)
def _matmat(self, V):
return torch.cat([M@V for M in self.Ms])
def _rmatmat(self, V):
Vs = torch.chunk(V, len(self.Ms))
return sum(Mi.t()@Vi for Mi, Vi in zip(self.Ms, Vs))
def to_dense(self):
dense_Ms = [M.to_dense() if isinstance(M, LinearOperator) else M for M in self.Ms]
return torch.cat(dense_Ms)
def to(self, device):
self.Ms = [M.to(device) for M in self.Ms]
self.device = torch.empty(0).to(device).device
| return self |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: xenxxxx/BitPay-Crypto-Signal-Trading-Bot
# Path: tests/conftest.py
EXMS = 'freqtrade.exchange.exchange.Exchange'
# Path: tests/conftest.py
def create_mock_trades(fee, is_short: Optional[bool] = False, use_db: bool = True):
"""
Create some fake trades ...
:param is_short: Optional bool, None creates a mix of long and short trades.
"""
def add_trade(trade):
if use_db:
Trade.session.add(trade)
else:
LocalTrade.add_bt_trade(trade)
is_short1 = is_short if is_short is not None else True
is_short2 = is_short if is_short is not None else False
# Simulate dry_run entries
trade = mock_trade_1(fee, is_short1)
add_trade(trade)
trade = mock_trade_2(fee, is_short1)
add_trade(trade)
trade = mock_trade_3(fee, is_short2)
add_trade(trade)
trade = mock_trade_4(fee, is_short2)
add_trade(trade)
trade = mock_trade_5(fee, is_short2)
add_trade(trade)
trade = mock_trade_6(fee, is_short1)
add_trade(trade)
if use_db:
Trade.commit()
# Path: tests/conftest.py
def create_mock_trades_usdt(fee, is_short: Optional[bool] = False, use_db: bool = True):
"""
Create some fake trades ...
"""
def add_trade(trade):
if use_db:
Trade.session.add(trade)
else:
LocalTrade.add_bt_trade(trade)
is_short1 = is_short if is_short is not None else True
is_short2 = is_short if is_short is not None else False
# Simulate dry_run entries
trade = mock_trade_usdt_1(fee, is_short1)
add_trade(trade)
trade = mock_trade_usdt_2(fee, is_short1)
add_trade(trade)
trade = mock_trade_usdt_3(fee, is_short1)
add_trade(trade)
trade = mock_trade_usdt_4(fee, is_short2)
add_trade(trade)
trade = mock_trade_usdt_5(fee, is_short2)
add_trade(trade)
trade = mock_trade_usdt_6(fee, is_short1)
add_trade(trade)
trade = mock_trade_usdt_7(fee, is_short1)
add_trade(trade)
if use_db:
Trade.commit()
# Path: tests/conftest.py
def get_patched_freqtradebot(mocker, config) -> FreqtradeBot:
"""
This function patches _init_modules() to not call dependencies
:param mocker: a Mocker object to apply patches
:param config: Config to pass to the bot
:return: FreqtradeBot
"""
patch_freqtradebot(mocker, config)
return FreqtradeBot(config)
# Path: tests/conftest.py
def patch_wallet(mocker, free=999.9) -> None:
mocker.patch('freqtrade.wallets.Wallets.get_free', MagicMock(
return_value=free
))
# Path: tests/test_wallets.py
from copy import deepcopy
from unittest.mock import MagicMock
from sqlalchemy import select
from freqtrade.constants import UNLIMITED_STAKE_AMOUNT
from freqtrade.exceptions import DependencyException
from freqtrade.persistence import Trade
from tests.conftest import (EXMS, create_mock_trades, create_mock_trades_usdt,
get_patched_freqtradebot, patch_wallet)
import pytest
mocker,
default_conf,
stake_amount,
min_stake,
stake_available,
max_stake,
trade_amount,
expected,
):
freqtrade = get_patched_freqtradebot(mocker, default_conf)
mocker.patch("freqtrade.wallets.Wallets.get_available_stake_amount",
return_value=stake_available)
res = freqtrade.wallets.validate_stake_amount(
'XRP/USDT', stake_amount, min_stake, max_stake, trade_amount)
assert res == expected
@pytest.mark.parametrize('available_capital,closed_profit,open_stakes,free,expected', [
(None, 10, 100, 910, 1000),
(None, 0, 0, 2500, 2500),
(None, 500, 0, 2500, 2000),
(None, 500, 0, 2500, 2000),
(None, -70, 0, 1930, 2000),
# Only available balance matters when it's set.
(100, 0, 0, 0, 100),
(1000, 0, 2, 5, 1000),
(1235, 2250, 2, 5, 1235),
(1235, -2250, 2, 5, 1235),
])
def test_get_starting_balance(mocker, default_conf, available_capital, closed_profit,
open_stakes, free, expected):
if available_capital:
default_conf['available_capital'] = available_capital
mocker.patch("freqtrade.persistence.models.Trade.get_total_closed_profit",
return_value=closed_profit)
mocker.patch("freqtrade.persistence.models.Trade.total_open_trades_stakes",
return_value=open_stakes)
mocker.patch("freqtrade.wallets.Wallets.get_free", return_value=free)
freqtrade = get_patched_freqtradebot(mocker, default_conf)
assert freqtrade.wallets.get_starting_balance() == expected
def test_sync_wallet_futures_live(mocker, default_conf):
default_conf['dry_run'] = False
default_conf['trading_mode'] = 'futures'
default_conf['margin_mode'] = 'isolated'
mock_result = [
{
"symbol": "ETH/USDT:USDT",
"timestamp": None,
"datetime": None,
"initialMargin": 0.0,
"initialMarginPercentage": None,
"maintenanceMargin": 0.0,
"maintenanceMarginPercentage": 0.005,
"entryPrice": 0.0,
"notional": 100.0,
"leverage": 5.0,
"unrealizedPnl": 0.0,
"contracts": 100.0,
"contractSize": 1,
"marginRatio": None,
"liquidationPrice": 0.0,
"markPrice": 2896.41,
"collateral": 20,
"marginType": "isolated",
"side": 'short',
"percentage": None
},
{
"symbol": "ADA/USDT:USDT",
"timestamp": None,
"datetime": None,
"initialMargin": 0.0,
"initialMarginPercentage": None,
"maintenanceMargin": 0.0,
"maintenanceMarginPercentage": 0.005,
"entryPrice": 0.0,
"notional": 100.0,
"leverage": 5.0,
"unrealizedPnl": 0.0,
"contracts": 100.0,
"contractSize": 1,
"marginRatio": None,
"liquidationPrice": 0.0,
"markPrice": 0.91,
"collateral": 20,
"marginType": "isolated",
"side": 'short',
"percentage": None
},
{
# Closed position
"symbol": "SOL/BUSD:BUSD",
"timestamp": None,
"datetime": None,
"initialMargin": 0.0,
"initialMarginPercentage": None,
"maintenanceMargin": 0.0,
"maintenanceMarginPercentage": 0.005,
"entryPrice": 0.0,
"notional": 0.0,
"leverage": 5.0,
"unrealizedPnl": 0.0,
"contracts": 0.0,
"contractSize": 1,
"marginRatio": None,
"liquidationPrice": 0.0,
"markPrice": 15.41,
"collateral": 0.0,
"marginType": "isolated",
"side": 'short',
"percentage": None
}
]
mocker.patch.multiple(
EXMS,
| get_balances=MagicMock(return_value={ |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: awslabs/optimizing-multitask-training-through-dynamic-pipelines
# Path: dynapipe/model.py
class DynaPipeMicrobatch:
# This class is used to represent a microbatch for DynaPipe, which can be
# converted to/from a model spec json file. It is used to supply
# arguments to the micro-batch generator and scheduler.
def __init__(self, name) -> None:
self.name = name
# in DynaPipeModel, "layer" refers to an actual layer in the model
self.n_layers = None
self.fw_exec_times = []
self.bw_exec_times = []
self.fw_comm_size = []
self.bw_comm_size = []
self.model_state_memory = []
self.model_stored_activation_memory = []
self.model_peak_activation_memory = []
self.activation_shapes = []
def _check_or_set_nlayers(self, n_layers, debug_name, minus_one=False):
expected_value = self.n_layers if not minus_one else self.n_layers - 1
if self.n_layers is not None:
assert (
n_layers == expected_value
), """{} must have length n_layers {} ({}),
but got length {}""".format(
debug_name,
"- 1" if minus_one else "",
expected_value,
n_layers,
)
else:
self.n_layers = n_layers
def set_fw_exec_times(self, fw_exec_times: List[float]) -> None:
# time is in us (microseconds)
self._check_or_set_nlayers(len(fw_exec_times), "fw_exec_times")
self.fw_exec_times = fw_exec_times
def set_bw_exec_times(self, bw_exec_times: List[float]) -> None:
# time is in us (microseconds)
self._check_or_set_nlayers(len(bw_exec_times), "bw_exec_times")
self.bw_exec_times = bw_exec_times
def set_fw_comm_size(self, fw_comm_size: List[float]) -> None:
# size is in mega bytes (MB)
self._check_or_set_nlayers(
len(fw_comm_size), "fw_comm_size", minus_one=True
)
self.fw_comm_size = fw_comm_size
def set_bw_comm_size(self, bw_comm_size: List[float]) -> None:
# size is in mega bytes (MB)
self._check_or_set_nlayers(
len(bw_comm_size), "bw_comm_size", minus_one=True
)
self.bw_comm_size = bw_comm_size
def set_model_state_memory(self, model_state_memory: List[float]) -> None:
# size is in MB (megabytes)
self._check_or_set_nlayers(
len(model_state_memory), "model_state_memory"
)
self.model_state_memory = model_state_memory
def set_model_stored_activation_memory(
self, model_stored_activation_memory: List[float]
) -> None:
# size is in MB (megabytes)
self._check_or_set_nlayers(
len(model_stored_activation_memory),
"model_stored_activation_memory",
)
self.model_stored_activation_memory = model_stored_activation_memory
def set_model_peak_activation_memory(
self, model_peak_activation_memory: List[float]
) -> None:
# size is in MB (megabytes)
self._check_or_set_nlayers(
len(model_peak_activation_memory), "model_peak_activation_memory"
)
self.model_peak_activation_memory = model_peak_activation_memory
def set_activation_shapes(
self, activation_shapes: List[List[Tuple[int, int, int]]]
) -> None:
# activation_shapes: outer list: layer, inner list: output activations
# Note that for decoders, the activation should be the
# output of encoder + decoder, since encoder output is needed for
# all decoder layers.
self._check_or_set_nlayers(len(activation_shapes), "activation_shapes")
# make shapes immutable
activation_shapes = [tuple(x) for x in activation_shapes]
self.activation_shapes = activation_shapes
def check_all_set(self):
assert self.n_layers is not None
assert len(self.fw_exec_times) == self.n_layers
assert len(self.bw_exec_times) == self.n_layers
assert len(self.fw_comm_size) == self.n_layers - 1
assert len(self.bw_comm_size) == self.n_layers - 1
assert len(self.model_state_memory) == self.n_layers
assert len(self.model_stored_activation_memory) == self.n_layers
assert len(self.model_peak_activation_memory) == self.n_layers
assert len(self.activation_shapes) == self.n_layers
def to_json(self) -> dict:
return {
"name": self.name,
"n_layers": self.n_layers,
"fw_exec_times": self.fw_exec_times,
"bw_exec_times": self.bw_exec_times,
"fw_comm_size": self.fw_comm_size,
"bw_comm_size": self.bw_comm_size,
"model_state_memory": self.model_state_memory,
"model_stored_activation_memory": self.model_stored_activation_memory, # noqa: E501
"model_peak_activation_memory": self.model_peak_activation_memory,
"activation_shapes": self.activation_shapes,
}
@staticmethod
def from_json(json_dict):
microbatch = DynaPipeMicrobatch(json_dict["name"])
microbatch.set_fw_exec_times(json_dict["fw_exec_times"])
microbatch.set_bw_exec_times(json_dict["bw_exec_times"])
microbatch.set_fw_comm_size(json_dict["fw_comm_size"])
microbatch.set_bw_comm_size(json_dict["bw_comm_size"])
microbatch.set_model_state_memory(json_dict["model_state_memory"])
microbatch.set_model_stored_activation_memory(
json_dict["model_stored_activation_memory"]
)
microbatch.set_model_peak_activation_memory(
json_dict["model_peak_activation_memory"]
)
microbatch.set_activation_shapes(json_dict["activation_shapes"])
return microbatch
# Path: dynapipe/model.py
class DynaPipeMinibatch:
# This class represents a list of microbatches (a minibatch)
def __init__(
self, name: str, microbatches: List[DynaPipeMicrobatch] = None
) -> None:
self.name = name
self.microbatches = microbatches if microbatches else []
self.n_layers = None if not microbatches else microbatches[0].n_layers
def add_microbatch(self, microbatch: DynaPipeMicrobatch) -> None:
if self.n_layers is None:
self.n_layers = microbatch.n_layers
else:
assert (
self.n_layers == microbatch.n_layers
), "All microbatches must have the same number of layers"
self.microbatches.append(microbatch)
def __str__(self):
return (
"("
+ self.name
+ ", "
+ str(len(self.microbatches))
+ " microbatches)"
)
@staticmethod
def from_json(json_dict):
minibatch = DynaPipeMinibatch(json_dict["name"])
json_list = json_dict["microbatches"]
for json_dict in json_list:
microbatch = DynaPipeMicrobatch.from_json(json_dict)
minibatch.add_microbatch(microbatch)
return minibatch
def to_json(self) -> dict:
return {
"name": self.name,
"microbatches": [
microbatch.to_json() for microbatch in self.microbatches
],
}
def permute_microbatches(self, permutation: List[int]) -> None:
assert len(permutation) == len(self.microbatches)
permuted_microbatches = [self.microbatches[i] for i in permutation]
return DynaPipeMinibatch(self.name, permuted_microbatches)
# Path: dynapipe/model.py
def get_uniform_cluster(n_devices, intra_node_bw=4800, inter_node_bw=100):
device2node = {i: i for i in range(n_devices)}
memory_limits = [1000000] * n_devices
cluster = DynaPipeCluster(
device2node, memory_limits, intra_node_bw, inter_node_bw, 0, 0
)
return cluster
# Path: dynapipe/schedule_opt/execution_planner.py
def optimize_schedule(
sch_type: str,
opt_minibatch: DynaPipeMinibatch,
opt_cluster: DynaPipeCluster,
device_assignment: List[int],
try_permutations=True,
perm_clusters=None,
perm_cluster_algo="kmeans",
include_memory_stats=False,
progress_bar=False,
memory_limit=float("inf"),
disable_scheduler_memory_limit=False,
max_otf_microbatches=None,
raise_on_oom=True,
rc_type: Optional[str] = None,
logger: Optional[logging.Logger] = None,
):
if try_permutations:
if perm_clusters is None:
if len(opt_minibatch.microbatches) > 20:
perm_clusters = 3
else:
perm_clusters = 4
if len(opt_minibatch.microbatches) > perm_clusters:
mb_vectors = []
for mb in opt_minibatch.microbatches:
# use fw and bw time as features
mb_vectors.append(
[
mb.fw_exec_times[0],
mb.fw_exec_times[-1],
mb.bw_exec_times[0],
mb.bw_exec_times[-1],
]
)
mb_vectors = np.array(mb_vectors)
if perm_cluster_algo == "kmeans":
cluster = KMeans(
perm_clusters,
random_state=0,
n_init="auto",
).fit(mb_vectors)
elif perm_cluster_algo == "agglomerative":
cluster = AgglomerativeClustering(
perm_clusters,
linkage="complete",
).fit(mb_vectors)
mb_labels = list(cluster.labels_)
n_clusters = max(mb_labels) + 1
assert n_clusters <= perm_clusters
mb_groups = [[] for _ in range(n_clusters)]
mb_idx2group = {}
for i, label in enumerate(mb_labels):
mb_groups[label].append(i)
mb_idx2group[i] = label
result_premutations = []
for perm in itertools.permutations(range(len(mb_groups))):
# generate a random permutation for each group
mb_random_perm_per_label = {}
for label, mb_indices in enumerate(mb_groups):
shuffled_indices = np.random.permutation(mb_indices)
mb_random_perm_per_label[label] = list(shuffled_indices)
reconstructed_perm = []
for label in perm:
reconstructed_perm.extend(mb_random_perm_per_label[label])
result_premutations.append(reconstructed_perm)
permutations = result_premutations
else:
permutations = list(
itertools.permutations(range(len(opt_minibatch.microbatches)))
)
else:
permutations = []
# always try the original order
permutations.append(list(range(len(opt_minibatch.microbatches))))
def _run_schedules(scheduler_memory_limit):
max_makespan = 0.0
max_stats = None
max_instructions = []
min_makespan = float("inf")
min_stats = None
min_instructions = []
if progress_bar:
from tqdm import tqdm
iterator = tqdm(permutations)
else:
iterator = permutations
debug_json = None
mem_for_perms = []
for perm in iterator:
permuted_minibatch = opt_minibatch.permute_microbatches(perm)
# get simulator
simulator = get_simulator(
sch_type,
permuted_minibatch,
opt_cluster,
device_assignment,
include_memory_stats=include_memory_stats,
memory_limit=scheduler_memory_limit,
max_otf_microbatches=max_otf_microbatches,
logger=logger,
)
timeline_json = simulator.schedule()
instructions = simulator.get_instructions()
peak_memory = simulator.get_executor_peak_memory()
max_memory_device = -1
max_device_memory = -1
for device, memory in peak_memory.items():
if memory > max_device_memory:
max_memory_device = device
max_device_memory = memory
makespan = simulator.get_makespan()
if makespan is None:
continue
makespan = makespan / 1000.0
debug_json = timeline_json
mem_for_perms.append(max_device_memory)
if max_device_memory > memory_limit:
continue
if makespan > max_makespan:
max_makespan = makespan
max_stats = (
perm,
max_device_memory,
max_memory_device,
timeline_json,
)
max_instructions = instructions
if makespan < min_makespan:
min_makespan = makespan
min_stats = (
perm,
max_device_memory,
max_memory_device,
timeline_json,
)
min_instructions = instructions
if logger is not None and max_makespan > 0.0:
logger.debug(
"Sched mem limit: {}, RC type: {}, Schedule type: {}, "
"min peak memory: {} MB, makespan: {}.".format(
scheduler_memory_limit,
rc_type,
sch_type,
min(mem_for_perms),
min_makespan,
)
)
return (
max_makespan,
max_stats,
max_instructions,
min_makespan,
min_stats,
min_instructions,
debug_json,
mem_for_perms,
)
# first try without setting memory limit on scheduler
# (i.e. see if there exist a feasible permutation)
(
max_makespan,
max_stats,
max_instructions,
min_makespan,
min_stats,
min_instructions,
debug_json,
mem_for_perms,
) = _run_schedules(float("inf"))
if (
max_makespan == 0.0
and sch_type == "wait-free-cyclic"
and not disable_scheduler_memory_limit
):
# try with scheduler memory limit
if logger is not None:
logger.debug("Trying with scheduler memory limit.")
(
max_makespan,
max_stats,
max_instructions,
min_makespan,
min_stats,
min_instructions,
debug_json,
mem_for_perms,
) = _run_schedules(memory_limit)
if max_makespan == 0.0 and raise_on_oom:
# with open("./test_memory.json", "w") as f:
# json.dump(debug_json, f)
raise RuntimeError(
"No feasible schedule within memory limit found. "
"Memory consumption for different permutations: "
"min: {}, max: {}.".format(
[] if not mem_for_perms else min(mem_for_perms),
[] if not mem_for_perms else max(mem_for_perms),
)
)
return (
max_makespan,
max_stats,
max_instructions,
min_makespan,
min_stats,
min_instructions,
)
# Path: scripts/simulation/schedule_under_dynamic_mb.py
import os
import numpy as np
from shift_trace_json import (
construct_exec_time_dict,
convert_to_multistream_comm,
)
from tqdm import tqdm
from dynapipe.model import (
DynaPipeMicrobatch,
DynaPipeMinibatch,
get_uniform_cluster,
)
from dynapipe.schedule_opt.execution_planner import optimize_schedule
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
def get_hetero_minibatch(
microbatch_multiplier, comm_factor=1
) -> DynaPipeMinibatch:
fw_times = [4000] * 16
memory_multiplier = microbatch_multiplier
microbatches = []
for i in range(len(microbatch_multiplier)):
current_fw_times = [
fw_times[j] * microbatch_multiplier[i]
for j in range(len(fw_times))
]
current_bw_times = [2 * t for t in current_fw_times]
microbatch = DynaPipeMicrobatch(str(i))
microbatch.set_fw_exec_times(current_fw_times)
microbatch.set_bw_exec_times(current_bw_times)
microbatch.set_fw_comm_size(
[200 * comm_factor * microbatch_multiplier[i]]
* (len(fw_times) - 1)
)
microbatch.set_bw_comm_size(
[200 * comm_factor * microbatch_multiplier[i]]
* (len(fw_times) - 1)
)
microbatch.set_model_state_memory([4000] * len(fw_times))
microbatch.set_model_stored_activation_memory(
[8000 * memory_multiplier[i]] * len(fw_times)
)
microbatch.set_model_peak_activation_memory(
[16000 * memory_multiplier[i]] * len(fw_times)
)
microbatch.set_activation_shapes(
[[(64, 128, 512)]] * (len(fw_times) // 2)
+ [[(64, 128, 512), (64, 128, 512)]] * (len(fw_times) // 2)
)
microbatches.append(microbatch)
minibatch = DynaPipeMinibatch("test", microbatches)
return minibatch
def gen_micro_batch_multipliers(n_iters, n_microbatches, std):
rng = np.random.default_rng(seed=48)
for _ in range(n_iters):
m = np.clip(rng.normal(1, std, size=n_microbatches), 0.1, 10)
normalized_m = m / (sum(m) / n_microbatches)
yield normalized_m
def schedule_minibatch(
n_stages,
sch_type,
n_iters,
n_microbatches=16,
std=0.1,
multistream=False,
comm_factor=1,
):
nlayers = 16
assert nlayers % n_stages == 0
| layers_per_stage = nlayers // n_stages |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ApolloAuto/apollo-model-yolox
# Path: yolox/data/dataloading.py
def get_yolox_datadir():
"""
get dataset dir of YOLOX. If environment variable named `YOLOX_DATADIR` is set,
this function will return value of the environment variable. Otherwise, use data
"""
yolox_datadir = os.getenv("YOLOX_DATADIR", None)
if yolox_datadir is None:
import yolox
yolox_path = os.path.dirname(os.path.dirname(yolox.__file__))
yolox_datadir = os.path.join(yolox_path, "datasets")
return yolox_datadir
# Path: yolox/exp/yolox_base.py
class Exp(BaseExp):
def __init__(self):
super().__init__()
# ---------------- model config ---------------- #
# detect classes number of model
self.num_classes = 8
# factor of model depth
self.depth = 1.00
# factor of model width
self.width = 1.00
# activation name. For example, if using "relu", then "silu" will be replaced to "relu".
self.act = "silu"
# ---------------- dataloader config ---------------- #
# set worker to 4 for shorter dataloader init time
# If your training process cost many memory, reduce this value.
self.data_num_workers = 4
self.input_size = (640, 640) # (height, width)
# Actual multiscale ranges: [640 - 5 * 32, 640 + 5 * 32].
# To disable multiscale training, set the value to 0.
self.multiscale_range = 5
# You can uncomment this line to specify a multiscale range
# self.random_size = (14, 26)
# dir of dataset images, if data_dir is None, this project will use `datasets` dir
self.data_dir = None
# name of annotation file for training
self.train_ann = "instances_train2017.json"
# name of annotation file for evaluation
self.val_ann = "instances_val2017.json"
# name of annotation file for testing
self.test_ann = "instances_test2017.json"
# --------------- transform config ----------------- #
# prob of applying mosaic aug
self.mosaic_prob = 1.0
# prob of applying mixup aug
self.mixup_prob = 1.0
# prob of applying hsv aug
self.hsv_prob = 1.0
# prob of applying flip aug
self.flip_prob = 0.5
# rotation angle range, for example, if set to 2, the true range is (-2, 2)
self.degrees = 10.0
# translate range, for example, if set to 0.1, the true range is (-0.1, 0.1)
self.translate = 0.1
self.mosaic_scale = (0.1, 2)
# apply mixup aug or not
self.enable_mixup = True
self.mixup_scale = (0.5, 1.5)
# shear angle range, for example, if set to 2, the true range is (-2, 2)
self.shear = 2.0
# -------------- training config --------------------- #
# epoch number used for warmup
self.warmup_epochs = 5
# max training epoch
self.max_epoch = 300
# minimum learning rate during warmup
self.warmup_lr = 0
self.min_lr_ratio = 0.05
# learning rate for one image. During training, lr will multiply batchsize.
self.basic_lr_per_img = 0.01 / 64.0
# name of LRScheduler
self.scheduler = "yoloxwarmcos"
# last #epoch to close augmention like mosaic
self.no_aug_epochs = 15
# apply EMA during training
self.ema = True
# weight decay of optimizer
self.weight_decay = 5e-4
# momentum of optimizer
self.momentum = 0.9
# log period in iter, for example,
# if set to 1, user could see log every iteration.
self.print_interval = 10
# eval period in epoch, for example,
# if set to 1, model will be evaluate after every epoch.
self.eval_interval = 10
# save history checkpoint or not.
# If set to False, yolox will only save latest and best ckpt.
self.save_history_ckpt = True
# name of experiment
self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
# ----------------- testing config ------------------ #
# output image size during evaluation/test
self.test_size = (640, 640)
# confidence threshold during evaluation/test,
# boxes whose scores are less than test_conf will be filtered
self.test_conf = 0.01
# nms threshold
self.nmsthre = 0.65
def get_model(self):
from yolox.models import YOLOX, YOLOPAFPN, YOLOXHead
def init_yolo(M):
for m in M.modules():
if isinstance(m, nn.BatchNorm2d):
m.eps = 1e-3
m.momentum = 0.03
if getattr(self, "model", None) is None:
in_channels = [256, 512, 1024]
backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels, act=self.act)
head = YOLOXHead(self.num_classes, self.width, in_channels=in_channels, act=self.act)
self.model = YOLOX(backbone, head)
self.model.apply(init_yolo)
self.model.head.initialize_biases(1e-2)
self.model.train()
return self.model
def get_dataset(self, cache: bool = False, cache_type: str = "ram"):
"""
Get dataset according to cache and cache_type parameters.
Args:
cache (bool): Whether to cache imgs to ram or disk.
cache_type (str, optional): Defaults to "ram".
"ram" : Caching imgs to ram for fast training.
"disk": Caching imgs to disk for fast training.
"""
from yolox.data import COCODataset, TrainTransform
return COCODataset(
data_dir=self.data_dir,
json_file=self.train_ann,
img_size=self.input_size,
preproc=TrainTransform(
max_labels=50,
flip_prob=self.flip_prob,
hsv_prob=self.hsv_prob
),
cache=cache,
cache_type=cache_type,
)
def get_data_loader(self, batch_size, is_distributed, no_aug=False, cache_img: str = None):
"""
Get dataloader according to cache_img parameter.
Args:
no_aug (bool, optional): Whether to turn off mosaic data enhancement. Defaults to False.
cache_img (str, optional): cache_img is equivalent to cache_type. Defaults to None.
"ram" : Caching imgs to ram for fast training.
"disk": Caching imgs to disk for fast training.
None: Do not use cache, in this case cache_data is also None.
"""
from yolox.data import (
TrainTransform,
YoloBatchSampler,
DataLoader,
InfiniteSampler,
MosaicDetection,
worker_init_reset_seed,
)
from yolox.utils import wait_for_the_master
# if cache is True, we will create self.dataset before launch
# else we will create self.dataset after launch
if self.dataset is None:
with wait_for_the_master():
assert cache_img is None, \
"cache_img must be None if you didn't create self.dataset before launch"
self.dataset = self.get_dataset(cache=False, cache_type=cache_img)
self.dataset = MosaicDetection(
dataset=self.dataset,
mosaic=not no_aug,
img_size=self.input_size,
preproc=TrainTransform(
max_labels=120,
flip_prob=self.flip_prob,
hsv_prob=self.hsv_prob),
degrees=self.degrees,
translate=self.translate,
mosaic_scale=self.mosaic_scale,
mixup_scale=self.mixup_scale,
shear=self.shear,
enable_mixup=self.enable_mixup,
mosaic_prob=self.mosaic_prob,
mixup_prob=self.mixup_prob,
)
if is_distributed:
batch_size = batch_size // dist.get_world_size()
sampler = InfiniteSampler(len(self.dataset), seed=self.seed if self.seed else 0)
batch_sampler = YoloBatchSampler(
sampler=sampler,
batch_size=batch_size,
drop_last=False,
mosaic=not no_aug,
)
dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True}
dataloader_kwargs["batch_sampler"] = batch_sampler
# Make sure each process has different random seed, especially for 'fork' method.
# Check https://github.com/pytorch/pytorch/issues/63311 for more details.
dataloader_kwargs["worker_init_fn"] = worker_init_reset_seed
train_loader = DataLoader(self.dataset, **dataloader_kwargs)
return train_loader
def random_resize(self, data_loader, epoch, rank, is_distributed):
tensor = torch.LongTensor(2).cuda()
if rank == 0:
size_factor = self.input_size[1] * 1.0 / self.input_size[0]
if not hasattr(self, 'random_size'):
min_size = int(self.input_size[0] / 32) - self.multiscale_range
max_size = int(self.input_size[0] / 32) + self.multiscale_range
self.random_size = (min_size, max_size)
size = random.randint(*self.random_size)
size = (int(32 * size), 32 * int(size * size_factor))
tensor[0] = size[0]
tensor[1] = size[1]
if is_distributed:
dist.barrier()
dist.broadcast(tensor, 0)
input_size = (tensor[0].item(), tensor[1].item())
return input_size
def preprocess(self, inputs, targets, tsize):
scale_y = tsize[0] / self.input_size[0]
scale_x = tsize[1] / self.input_size[1]
if scale_x != 1 or scale_y != 1:
inputs = nn.functional.interpolate(
inputs, size=tsize, mode="bilinear", align_corners=False
)
targets[..., 1::2] = targets[..., 1::2] * scale_x
targets[..., 2::2] = targets[..., 2::2] * scale_y
return inputs, targets
def get_optimizer(self, batch_size):
if "optimizer" not in self.__dict__:
if self.warmup_epochs > 0:
lr = self.warmup_lr
else:
lr = self.basic_lr_per_img * batch_size
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in self.model.named_modules():
if hasattr(v, "bias") and isinstance(v.bias, nn.Parameter):
pg2.append(v.bias) # biases
if isinstance(v, nn.BatchNorm2d) or "bn" in k:
pg0.append(v.weight) # no decay
elif hasattr(v, "weight") and isinstance(v.weight, nn.Parameter):
pg1.append(v.weight) # apply decay
optimizer = torch.optim.SGD(
pg0, lr=lr, momentum=self.momentum, nesterov=True
)
optimizer.add_param_group(
{"params": pg1, "weight_decay": self.weight_decay}
) # add pg1 with weight_decay
optimizer.add_param_group({"params": pg2})
self.optimizer = optimizer
return self.optimizer
def get_lr_scheduler(self, lr, iters_per_epoch):
from yolox.utils import LRScheduler
scheduler = LRScheduler(
self.scheduler,
lr,
iters_per_epoch,
self.max_epoch,
warmup_epochs=self.warmup_epochs,
warmup_lr_start=self.warmup_lr,
no_aug_epochs=self.no_aug_epochs,
min_lr_ratio=self.min_lr_ratio,
)
return scheduler
def get_eval_dataset(self, **kwargs):
from yolox.data import COCODataset, ValTransform
testdev = kwargs.get("testdev", False)
legacy = kwargs.get("legacy", False)
return COCODataset(
data_dir=self.data_dir,
json_file=self.val_ann if not testdev else self.test_ann,
name="val2017" if not testdev else "test2017",
img_size=self.test_size,
preproc=ValTransform(legacy=legacy),
)
def get_eval_loader(self, batch_size, is_distributed, **kwargs):
valdataset = self.get_eval_dataset(**kwargs)
if is_distributed:
batch_size = batch_size // dist.get_world_size()
sampler = torch.utils.data.distributed.DistributedSampler(
valdataset, shuffle=False
)
else:
sampler = torch.utils.data.SequentialSampler(valdataset)
dataloader_kwargs = {
"num_workers": self.data_num_workers,
"pin_memory": True,
"sampler": sampler,
}
dataloader_kwargs["batch_size"] = batch_size
val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)
return val_loader
def get_evaluator(self, batch_size, is_distributed, testdev=False, legacy=False):
from yolox.evaluators import COCOEvaluator
return COCOEvaluator(
dataloader=self.get_eval_loader(batch_size, is_distributed,
testdev=testdev, legacy=legacy),
img_size=self.test_size,
confthre=self.test_conf,
nmsthre=self.nmsthre,
num_classes=self.num_classes,
testdev=testdev,
)
def get_trainer(self, args):
from yolox.core import Trainer
trainer = Trainer(self, args)
# NOTE: trainer shouldn't be an attribute of exp object
return trainer
def eval(self, model, evaluator, is_distributed, half=False, return_outputs=False):
return evaluator.evaluate(model, is_distributed, half, return_outputs=return_outputs)
# Path: exps/example/yolox_voc/yolox_voc_m.py
import os
from yolox.data import get_yolox_datadir
from yolox.exp import Exp as MyExp
from yolox.data import VOCDetection, TrainTransform
from yolox.data import VOCDetection, ValTransform
from yolox.evaluators import VOCEvaluator
# encoding: utf-8
class Exp(MyExp):
def __init__(self):
super(Exp, self).__init__()
self.num_classes = 8 # TODO: KITTI class is 6
self.depth = 0.67
self.width = 0.75
self.warmup_epochs = 1
# ---------- transform config ------------ #
self.mosaic_prob = 1.0
self.mixup_prob = 1.0
self.flip_prob = 0.5
self.hsv_prob = 1.0
self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
def get_dataset(self, cache: bool, cache_type: str = "disk"):
return VOCDetection(
data_dir=os.path.join(get_yolox_datadir(), "CUSTOMER"), # TODO: CUSTOMER to KITTI
image_sets=[('train')],
img_size=self.input_size,
preproc=TrainTransform(
max_labels=50,
flip_prob=self.flip_prob,
hsv_prob=self.hsv_prob),
# cache=True,
# cache_type="disk",
)
def get_eval_dataset(self, **kwargs):
legacy = kwargs.get("legacy", False)
return VOCDetection(
data_dir=os.path.join(get_yolox_datadir(), "CUSTOMER"), # TODO: CUSTOMER to KITTI
image_sets=[('test')],
img_size=self.test_size,
preproc=ValTransform(legacy=legacy),
)
| def get_evaluator(self, batch_size, is_distributed, testdev=False, legacy=False): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ndiamant/spice
# Path: spice/spice_n2.py
def batch_f_bar(
x: torch.Tensor,
knot_left: torch.Tensor, knot_right: torch.Tensor,
a: torch.Tensor, b: torch.Tensor, c: torch.Tensor,
min_val: float = 0.0,
) -> torch.Tensor:
"""
x: inputs to polynomial: bsz x d
knot_left: bsz x (K - 1)
knot_right: bsz x (K - 1)
a: bsz x K - 1
b: bsz x K - 1
c: bsz x K - 1
where K is the number of knots.
return: bsz x d
"""
assert x.ndim == 2
assert x.shape[0] == knot_left.shape[0]
assert len({knot_left.shape, knot_right.shape, a.shape, b.shape, c.shape}) == 1
# find which coefficients to use with which inputs
which_bin_mask = (
(x.unsqueeze(-1) >= knot_left.unsqueeze(1))
& (x.unsqueeze(-1) < knot_right.unsqueeze(1))
) # bsz x d x (K - 1)
a = torch.masked_select(a.unsqueeze(1), which_bin_mask).reshape(x.shape)
b = torch.masked_select(b.unsqueeze(1), which_bin_mask).reshape(x.shape)
c = torch.masked_select(c.unsqueeze(1), which_bin_mask).reshape(x.shape)
# evaluate the function
return f_bar(x, a, b, c, min_val)
# Path: spice/spice_n2.py
def batch_f_bar_integral(
knot_left: torch.Tensor, knot_right: torch.Tensor,
a: torch.Tensor, b: torch.Tensor, c: torch.Tensor,
f_bar_min_val: float = 0.0,
):
"""
knot_left: bsz x (K - 1)
knot_right: bsz x (K - 1)
a: bsz x K - 1
b: bsz x K - 1
c: bsz x K - 1
where K is the number of knots.
return: bsz x 1
"""
assert knot_left.ndim == 2
assert len({knot_left.shape, knot_right.shape, a.shape, b.shape, c.shape}) == 1
integrals = f_bar_integral(knot_left, knot_right, a, b, c, f_bar_min_val).clip(1e-3)
return integrals.sum(dim=1, keepdim=True)
# Path: spice/spice_n2.py
def f_bar(x: torch.Tensor, a: torch.Tensor, b: torch.Tensor, c: torch.Tensor, min_val: float = 0.0) -> torch.Tensor:
return torch.clip(a * x ** 2 + b * x + c, min_val)
# Path: spice/spice_n2.py
def f_bar_integral(
x0: torch.Tensor, x1: torch.Tensor, a: torch.Tensor, b: torch.Tensor, c: torch.Tensor,
f_bar_min_value: float = 0,
) -> torch.Tensor:
# calc integral as if just a quadratic
above_zero_int = quad_integral(x0, x1, a, b, c)
# subtract the part below zero, which is thresholded
discriminant = b ** 2 - 4 * a * (c - f_bar_min_value)
root_denom = torch.clip((2 * a).abs(), 1e-10) * (torch.sign(a) + 1e-10)
root_1 = (-b - torch.sqrt(discriminant.clip(1e-10))) / root_denom
root_2 = (-b + torch.sqrt(discriminant.clip(1e-10))) / root_denom
smaller_root = torch.minimum(root_1, root_2)
bigger_root = torch.maximum(root_1, root_2)
# find the intersection of (x0, x1) ^ (r1, r1)
smaller_root = torch.minimum(torch.maximum(smaller_root, x0), x1)
bigger_root = torch.maximum(torch.minimum(bigger_root, x1), x0)
between_roots_int = quad_integral(smaller_root, bigger_root, a, b, c)
# set the integral to zero if there are no roots or if the parabola's peak is above 0
parab_max = c - f_bar_min_value - b ** 2 / (4 * a)
ignore_roots_integral = (discriminant <= 0) | (parab_max > 0) | (root_1.isnan()) | (root_2.isnan())
between_roots_int = torch.where(ignore_roots_integral, 0, between_roots_int)
# account for f_bar_min_value
min_val_integral = (bigger_root - smaller_root) * f_bar_min_value
min_val_integral = torch.where(ignore_roots_integral, 0, min_val_integral)
# return the result
return above_zero_int - between_roots_int + min_val_integral
# Path: spice/spice_n2.py
class ConditionalQuadratic(nn.Module):
def __init__(
self, condition_dim: int, n_knots: int,
learn_bin_widths: bool = False,
min_f_bar_val: float = 1e-2, # This is the minimum likelihood the model can output
bin_width_init: torch.Tensor = None,
bin_height_init: torch.Tensor = None,
):
"""
bin_width_init: n_knots - 1
bin_height_init: n_knots - 1
"""
super().__init__()
self.n_knots = n_knots
self.learn_bin_widths = learn_bin_widths
self.min_f_bar_val = min_f_bar_val
self.get_widths = nn.Sequential(
nn.GELU(), nn.Linear(condition_dim, n_knots - 1),
) if learn_bin_widths else None
self.get_heights = nn.Sequential(
nn.GELU(), nn.Linear(condition_dim, n_knots * 2 - 1),
)
self._init_bins(bin_width_init, bin_height_init)
@torch.no_grad()
def _init_bins(self, width_init: torch.Tensor, height_init: torch.Tensor):
# handle height initialization
if height_init is None:
height_init = torch.ones(self.n_knots * 2 - 1)
height_init[:self.n_knots] = softplus_inverse(torch.ones(self.n_knots))
height_init += torch.randn_like(height_init) * 1e-2
else:
smart_height = torch.zeros(2 * self.n_knots - 1)
smart_height[:self.n_knots - 1] = softplus_inverse(height_init)
smart_height[1: self.n_knots] = softplus_inverse(height_init)
smart_height[self.n_knots:] = height_init
height_init = smart_height
self.get_heights[-1].bias = nn.Parameter(height_init)
self.get_heights[-1].weight = nn.Parameter(
torch.randn_like(self.get_heights[-1].weight) / 10,
)
if width_init is None:
width_init = torch.full((self.n_knots - 1,), 1 / (self.n_knots - 1))
if self.learn_bin_widths:
self.get_widths[-1].bias = torch.nn.Parameter(width_init.log())
self.get_widths[-1].weight = nn.Parameter(
torch.randn_like(self.get_widths[-1].weight) / 10,
)
else:
cum_width = width_init.cumsum(dim=0)
self.register_buffer("x1", F.pad(cum_width, (1, 0))[:-1])
self.register_buffer("x3", cum_width)
self.register_buffer("x2", (self.x1 + self.x3) / 2)
def get_x123(self, z: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
if not self.learn_bin_widths:
bsz = z.shape[0]
x1 = self.x1.repeat(bsz, 1)
x2 = self.x2.repeat(bsz, 1)
x3 = self.x3.repeat(bsz, 1)
return x1, x2, x3
# widths -> x positions
w = self.get_widths(z)
# make sure there's a smallest bin width
min_bin_width = 1 / (self.n_knots * 10)
w = F.softmax(w, dim=-1)
w = min_bin_width + (1 - min_bin_width * self.n_knots) * w
x = w.cumsum(dim=-1)
x = torch.cat([torch.zeros_like(x[:, :1]), x], dim=-1)
x[:, -1] = x[:, -1] + min_bin_width
x = x.clip(0, 1)
x1 = x[:, :-1]
x3 = x[:, 1:]
x2 = (x3 + x1) / 2
return x1, x2, x3
def get_lagrange_inputs(self, z: torch.Tensor) -> tuple[
tuple[torch.Tensor, torch.Tensor, torch.Tensor], # x1, x2, x3
tuple[torch.Tensor, torch.Tensor, torch.Tensor], # y1, y2, y3
]:
x1, x2, x3 = self.get_x123(z)
# normalized heights
y_all = self.get_heights(z)
y_positive = F.softplus(y_all[:, :self.n_knots])
y1 = y_positive[:, :-1]
y3 = y_positive[:, 1:]
y2 = y_all[:, self.n_knots:]
return (x1, x2, x3), (y1, y2, y3)
def get_quadratic_coeffs(self, z: torch.Tensor) -> tuple[
tuple[torch.Tensor, torch.Tensor], [torch.Tensor, torch.Tensor, torch.Tensor],
]:
(x1, x2, x3), (y1, y2, y3) = self.get_lagrange_inputs(z)
a, b, c = lagrange_coeffs(x1, y1, x2, y2, x3, y3)
integral = batch_f_bar_integral(x1, x3, a, b, c, self.min_f_bar_val)
return (x1, x3), (a / integral, b / integral, c / integral)
def forward(self, z: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
(knot_left, knot_right), (a, b, c) = self.get_quadratic_coeffs(z)
return batch_f_bar(
y.clip(0, 1 - 1e-5), knot_left, knot_right, a, b, c, self.min_f_bar_val,
)
# Path: spice/spice_n2.py
def lagrange_coeffs(
x1: torch.Tensor, y1: torch.Tensor,
x2: torch.Tensor, y2: torch.Tensor,
x3: torch.Tensor, y3: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
denom = (x1 - x2) * (x1 - x3) * (x2 - x3)
a = (x3 * (y2 - y1) + x2 * (y1 - y3) + x1 * (y3 - y2)) / denom
b = (x1 ** 2 * (y2 - y3) + x3 ** 2 * (y1 - y2) + x2 ** 2 * (y3 - y1)) / denom
c = (x2 ** 2 * (x3 * y1 - x1 * y3) + x2 * (x1 ** 2 * y3 - x3 ** 2 * y1) + x1 * x3 * (x3 - x1) * y2) / denom
return a, b, c
# Path: spice/spice_n2.py
@torch.no_grad()
def integrate_above_cutoff(
knot_left: torch.Tensor, knot_right: torch.Tensor,
a: torch.Tensor, b: torch.Tensor, c: torch.Tensor,
cutoff: float,
) -> torch.Tensor:
integral_above = batch_f_bar_integral(
knot_left, knot_right, a, b, c - cutoff,
)
intervals_above = get_intervals(knot_left, knot_right, a, b, c, cutoff)
intervals_above_sizes = get_interval_sizes(knot_left, knot_right, *intervals_above).unsqueeze(1)
return integral_above + intervals_above_sizes * cutoff
# Path: spice/spice_n2.py
@torch.no_grad()
def max_f_bar_val(
knot_left: torch.Tensor, knot_right: torch.Tensor,
a: torch.Tensor, b: torch.Tensor, c: torch.Tensor,
) -> torch.Tensor:
# knot heights
left_height = batch_f_bar(knot_left, knot_left, knot_right, a, b, c)
right_height = batch_f_bar(knot_right - 1e-5, knot_left, knot_right, a, b, c)
max_height = torch.maximum(left_height, right_height)
# vertex heights
parab_max = c - b ** 2 / (4 * a)
parab_max_x = -b / (2 * a)
vertex_between_knots = (
(parab_max_x >= knot_left) & (parab_max_x < knot_right)
)
parab_max = torch.where(vertex_between_knots, parab_max, 0)
# merge vertex and knot heights
return torch.maximum(parab_max, max_height).max(dim=1).values
# Path: spice/spice_n2.py
@torch.no_grad()
def find_hpd_cutoff(
knot_left: torch.Tensor, knot_right: torch.Tensor,
a: torch.Tensor, b: torch.Tensor, c: torch.Tensor,
target_integral: float, verbose: bool = False,
max_iter: int = 15,
) -> torch.Tensor:
lower = torch.zeros_like(knot_left[:, 0]).unsqueeze(1)
upper = max_f_bar_val(knot_left, knot_right, a, b, c).unsqueeze(1)
for i in range(max_iter):
mid = (upper + lower) / 2
score_mid = integrate_above_cutoff(knot_left, knot_right, a, b, c, mid)
lower = torch.where(
score_mid > target_integral, mid, lower,
)
upper = torch.where(
score_mid > target_integral, upper, mid,
)
if verbose:
print(f"{i}: mean integral difference = {(score_mid - target_integral).abs().mean():.4f}")
return mid
# Path: tests/test_spice_n2.py
import pytest
import torch
from spice.spice_n2 import (
batch_f_bar, batch_f_bar_integral, f_bar, f_bar_integral, ConditionalQuadratic, lagrange_coeffs,
integrate_above_cutoff, max_f_bar_val, find_hpd_cutoff,
)
def test_batch_f_bar():
bsz = 3
K = 5
x = torch.linspace(0, 1 - 1e-3, 7).repeat(bsz, 1)
a = torch.randn((bsz, K - 1))
b = torch.randn((bsz, K - 1))
c = torch.randn((bsz, K - 1))
knots = torch.linspace(0, 1, K).repeat(bsz, 1)
knot_left = knots[:, :-1]
knot_right = knots[:, 1:]
y = batch_f_bar(x, knot_left, knot_right, a, b, c)
assert y.shape == x.shape
@pytest.mark.parametrize("seed", [1, 3, 5])
def test_batch_f_bar_integral(seed: int):
torch.manual_seed(seed)
bsz = 3
K = 5
z_dim = 7
z = torch.randn((bsz, z_dim))
quad = ConditionalQuadratic(condition_dim=z_dim, n_knots=K)
quad.get_heights[-1].bias = torch.nn.Parameter(torch.randn_like(quad.get_heights[-1].bias))
(knot_left, knot_right), (a, b, c) = quad.get_quadratic_coeffs(z)
min_f_bar_val = 0.25
with torch.no_grad():
integral = batch_f_bar_integral(knot_left, knot_right, a, b, c, min_f_bar_val)
assert integral.shape == (bsz, 1)
# make sure integral is correct
x = torch.linspace(0, 1 - 1e-3, 5000).repeat(bsz, 1)
with torch.no_grad():
numeric_integral = batch_f_bar(x, knot_left, knot_right, a, b, c, min_f_bar_val).mean(dim=1)
assert (integral.squeeze() - numeric_integral).abs().max().item() < 5e-3
@pytest.mark.parametrize("seed", [1, 3, 5])
def test_conditional_quadratic(seed: int):
torch.manual_seed(seed)
bsz = 3
K = 5
z_dim = 7
z = torch.randn((bsz, z_dim))
quad = ConditionalQuadratic(condition_dim=z_dim, n_knots=K)
quad.get_heights[-1].bias = torch.nn.Parameter(torch.randn_like(quad.get_heights[-1].bias))
x = torch.linspace(0, 1 - 1e-3, 10000).repeat(bsz, 1)
density = quad(z, x)
assert (density >= 0).all()
assert ((density.mean(dim=1) - 1).abs() < 2e-2).all()
def get_knot_params() -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
knot_left = torch.tensor([
[0.0, 0.5, 0.8],
])
knot_right = torch.tensor([
[0.5, 0.8, 1.0],
])
knot_mid = (knot_right + knot_left) / 2
| height_left = torch.tensor([ |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: nik-sm/com-hom-emg
# Path: com_hom_emg/basic_arch.py
class EmbeddingNetwork(nn.Module):
# TODO - design the structure of this model.
# - consider taking ideas from transformer encoders or other domains.
# - search for papers that extract useful features from EMG
def __init__(
self,
input_channels: int,
input_time_length: int,
feature_dim: int,
normalized_features: bool,
use_preprocessed_data: bool = False,
):
super().__init__()
layers = [
*ResBlock(input_channels, 64),
*ResBlock(64),
*ResBlock(64),
*ResBlock(64),
*ResBlock(64),
*ResBlock(64),
]
# NOTE - preprocessing includes 4x downsample. If no preprocessing, include 2 more blocks of 2x pooling:
if not use_preprocessed_data:
layers.extend([*ResBlock(64), *ResBlock(64)])
layers.append(nn.Flatten())
self.model = nn.Sequential(*layers)
dim_after = self.model(torch.zeros(1, input_channels, input_time_length)).shape[-1]
logger.info(f"Dimension after convolution: {dim_after}")
self.model.append(nn.Linear(dim_after, feature_dim, bias=False))
self.model.append(nn.BatchNorm1d(feature_dim))
self.model.append(nn.ReLU(inplace=True))
self.model.append(nn.Linear(feature_dim, feature_dim))
if normalized_features:
# self.model.append(nn.BatchNorm1d(feature_dim))
self.model.append(UnitNormLayer())
def forward(self, data):
return self.model(data)
# Path: com_hom_emg/basic_arch.py
class UnitNormLayer(nn.Module):
def forward(self, x):
return torch.nn.functional.normalize(x, dim=-1)
# Path: com_hom_emg/conformer.py
class Conformer(nn.Sequential):
def __init__(self, feature_dim: int, normalized_features: bool, emb_size=K, depth=6):
layers = [
Rearrange("batch channel time -> batch () channel time"),
PatchEmbedding(emb_size),
TransformerEncoder(depth, emb_size),
nn.Flatten(),
nn.Linear(1400, feature_dim),
]
if normalized_features:
# layers.append(nn.BatchNorm1d(feature_dim))
layers.append(UnitNormLayer())
super().__init__(*layers)
# Path: com_hom_emg/data.py
class DataModule(LightningDataModule):
@staticmethod
def add_argparse_args(parent_parser):
parser = parent_parser.add_argument_group("DataModule")
parser.add_argument("--fold", type=int, required=True)
parser.add_argument("--n_train_subj", type=int, default=8)
parser.add_argument("--n_val_subj", type=int, default=1)
parser.add_argument("--n_test_subj", type=int, default=1)
parser.add_argument("--batch_size", type=int, default=128)
parser.add_argument("--num_workers", type=int, default=8)
parser.add_argument("--use_preprocessed_data", type=str2bool, default=False)
return parent_parser
def __init__(
self,
*,
# seed and per_subj_data come from cli
seed: int,
per_subj_data: dict,
#
fold: int,
n_train_subj: int,
n_val_subj: int,
n_test_subj: int,
batch_size: int,
num_workers: int,
use_preprocessed_data: bool,
**kw,
):
"""
From N subjects, we select 1 for val, 1 for test, and N-2 for train.
In each set, data are merged and shuffled.
While loading, we distinguish single and double gestures for easier splitting during train steps.
"""
super().__init__()
self.train_set, self.val_set, self.test_set = get_datasets(
per_subj_data, fold, n_train_subj, n_val_subj, n_test_subj, use_preprocessed_data
)
self.batch_size = batch_size
self.num_workers = num_workers
self.seed = seed
self.example_data_shape = self.train_set.tensors[0][0].shape
def get_loader(self, dataset, shuffle: bool):
return DataLoader(
dataset,
shuffle=shuffle,
pin_memory=True,
batch_size=self.batch_size,
num_workers=self.num_workers,
worker_init_fn=seed_worker,
generator=torch.Generator().manual_seed(self.seed),
persistent_workers=True,
)
def train_dataloader(self):
return self.get_loader(self.train_set, shuffle=True)
def val_dataloader(self):
return self.get_loader(self.val_set, shuffle=False)
def test_dataloader(self):
return self.get_loader(self.test_set, shuffle=False)
# Path: com_hom_emg/data.py
def get_per_subj_data():
path = PROJECT_PATH / "data" / "combination-gesture-dataset" / "python"
per_subj_data = {}
for subj_idx in range(10):
per_subj_data[subj_idx] = {
"data": np.load(path / f"subj{subj_idx}/data.npy"),
"labels": np.load(path / f"subj{subj_idx}/labels.npy"),
}
return per_subj_data
# Path: com_hom_emg/data.py
def shuffle_together(*tensors):
"""Shuffle tensors together"""
assert all(isinstance(x, torch.Tensor) for x in tensors)
assert all(len(x) == len(tensors[0]) for x in tensors)
p = torch.randperm(len(tensors[0]))
return [x[p] for x in tensors]
# Path: com_hom_emg/loss.py
class TripletCentroids(nn.Module):
"""
Randomly initialize a centroid for each class.
For each item, form triplets by comparing it to class centroids
(there is exactly one positive centroid, and one randomly chosen negative centroid)
Update centroids gradually using momentum.
"""
def __init__(self, margin, feature_dim: int, device: str, momentum=0.9):
super().__init__()
self.margin = margin
self.momentum = momentum
# TODO - init the centroids farther apart or closer together?
#
# https://math.stackexchange.com/questions/917292/expected-distance-between-two-vectors-that-belong-to-two-different-gaussian-dist # noqa
# Expected distance between two independent gaussian vectors of dimension D is:
# E[ || x - y || ^ 2 ] = || mu_x - mu_y || ^ 2 + tr(Cov_x + Cov_y)
# torch.randn(n_items, n_features) * sigma has (approximately) mean = 0,
# and spherical covariance = sigma**2 * torch.eye(n_features)
# Expected distance between any pair of centroids will be:
#
# dist = 0 + trace(Cov_1 + Cov_2) = 2 * sigma**2 * n_features
# dist = 0 + trace(2 * sigma**2 * n_features)
# dist = 2 * sigma**2 * n_features
self.keys = {torch.tensor([d, m], device=device, requires_grad=False) for (d, m) in product(range(4), range(4))}
self.real_centroids = {k: torch.randn((feature_dim,), device=device, requires_grad=False) for k in self.keys}
self.fake_centroids = {k: torch.randn((feature_dim,), device=device, requires_grad=False) for k in self.keys}
def forward(
self,
real_double_features: torch.Tensor,
fake_double_features: torch.Tensor,
real_double_labels: torch.Tensor,
fake_double_labels: torch.Tensor,
):
assert len(real_double_features) == len(real_double_labels)
assert len(fake_double_features) == len(fake_double_labels)
assert len(real_double_features) > 0
assert len(fake_double_features) > 0
# Loop over real classes, computing triplet losses
# In first iteration, anchor items all belong to c0.
# Next iter, all anchors belong to c1, etc.
# For each anchor item, just compute 1 triplet.
anchors, positives, negatives = [], [], []
for label in self.keys:
anchor_idx = real_double_labels.eq(label).all(-1)
_anchors = real_double_features[anchor_idx]
if len(_anchors) == 0:
continue
# Use the matching centroid from fake items as positive
positive_centroid = self.fake_centroids[label]
# Sample 1 negative centroid (with replacement) for each anchor item
negative_classes = list(self.keys - {label})
negative_centroid_labels = random.choices(negative_classes, k=len(_anchors))
for a, n in zip(_anchors, negative_centroid_labels):
negative_centroid = self.fake_centroids[n]
anchors.append(a)
positives.append(positive_centroid)
negatives.append(negative_centroid)
# Loop over fake classes as anchors
anchors, positives, negatives = [], [], []
for label in self.keys:
anchor_idx = fake_double_labels.eq(label).all(-1)
_anchors = fake_double_features[anchor_idx]
if len(_anchors) == 0:
continue
# Use the matching centroid from real items as positive
positive_centroid = self.real_centroids[label]
# Sample 1 negative centroid (with replacement) for each anchor item
negative_classes = list(self.keys - {label})
negative_centroid_labels = random.choices(negative_classes, k=len(_anchors))
for a, n in zip(_anchors, negative_centroid_labels):
negative_centroid = self.real_centroids[n]
anchors.append(a)
positives.append(positive_centroid)
negatives.append(negative_centroid)
if len(anchors) == 0:
logger.warning("No triplets found")
loss = torch.tensor(0.0)
else:
anchors = torch.stack(anchors)
positives = torch.stack(positives)
negatives = torch.stack(negatives)
# Compute loss
loss = F.triplet_margin_loss(anchors, positives, negatives, margin=self.margin)
# Update centroids with momentum
# (update after computing loss; same order as in SGD with momentum)
with torch.no_grad():
for label, prev in self.real_centroids.items():
match_idx = real_double_labels.eq(label).all(-1)
if match_idx.sum() == 0:
continue
curr = real_double_features[match_idx].mean(0).detach()
self.real_centroids[label] = self.momentum * prev + (1 - self.momentum) * curr
for label, prev in self.fake_centroids.items():
match_idx = fake_double_labels.eq(label).all(-1)
if match_idx.sum() == 0:
continue
curr = fake_double_features[match_idx].mean(0).detach()
self.fake_centroids[label] = self.momentum * prev + (1 - self.momentum) * curr
return loss
# Path: com_hom_emg/loss.py
class TripletLoss(nn.Module):
"""A random positive and a random negative item are used for a triplet"""
def __init__(self, margin: float, triplets_per_item: int = 1):
super().__init__()
self.margin = margin
self.triplets_per_item = triplets_per_item
def forward(
self,
real_double_features: torch.Tensor,
fake_double_features: torch.Tensor,
real_double_labels: torch.Tensor,
fake_double_labels: torch.Tensor,
):
assert len(real_double_features) == len(real_double_labels)
assert len(fake_double_features) == len(fake_double_labels)
assert len(real_double_features) > 0
assert len(fake_double_features) > 0
embeddings = torch.cat([real_double_features, fake_double_features], dim=0)
labels = torch.cat([real_double_labels, fake_double_labels], dim=0)
device = embeddings.device
is_real = torch.cat(
[
torch.ones(len(real_double_labels), device=device),
torch.zeros(len(fake_double_labels), device=device),
],
)
pairwise_dist = torch.cdist(embeddings, embeddings)
# Masks: for each row, which items are valid as a positive or negative item
# Positive items: same label, opposite realness
# Negative items: diff label, opposite realness
positive_mask, negative_mask = get_masks(labels, is_real)
positive_mask = positive_mask.int()
negative_mask = negative_mask.int()
# Subset to rows with at least K positive and at least K negative so we can form K triplets per row
subset_idx = (positive_mask.sum(1) >= self.triplets_per_item) & (negative_mask.sum(1) >= self.triplets_per_item)
if subset_idx.sum() == 0:
logger.warning(f"Not enough triplets per item (wanted: {self.triplets_per_item})")
return torch.tensor(0.0).to(embeddings.device)
pairwise_dist = pairwise_dist[subset_idx, :]
positive_mask = positive_mask[subset_idx, :]
negative_mask = negative_mask[subset_idx, :]
# The masks contain all "0" and "1" integers.
# topk returns indices of first K "1" values in each row
# Since batch contains shuffled items, the first K neighbors are random
first_k_positive_idx = positive_mask.topk(self.triplets_per_item, dim=1, sorted=False).indices
first_k_negative_idx = negative_mask.topk(self.triplets_per_item, dim=1, sorted=False).indices
anchor_positive_dist = pairwise_dist.gather(1, first_k_positive_idx)
anchor_negative_dist = pairwise_dist.gather(1, first_k_negative_idx)
triplet_loss = F.relu(anchor_positive_dist - anchor_negative_dist + self.margin, inplace=True).mean()
return triplet_loss
# Path: com_hom_emg/loss.py
class TripletLossHardMining(nn.Module):
"""The farthest positive and the closest negative item are used for a triplet"""
# see:
# https://omoindrot.github.io/triplet-loss
# https://arxiv.org/abs/1703.07737
# https://github.com/NegatioN/OnlineMiningTripletLoss/blob/master/online_triplet_loss/losses.py
def __init__(self, margin: float):
super().__init__()
self.margin = margin
def forward(
self,
real_double_features: torch.Tensor,
fake_double_features: torch.Tensor,
real_double_labels: torch.Tensor,
fake_double_labels: torch.Tensor,
):
assert len(real_double_features) == len(real_double_labels)
assert len(fake_double_features) == len(fake_double_labels)
assert len(real_double_features) > 0
assert len(fake_double_features) > 0
embeddings = torch.cat([real_double_features, fake_double_features], dim=0)
labels = torch.cat([real_double_labels, fake_double_labels], dim=0)
device = embeddings.device
is_real = torch.cat(
[
torch.ones(len(real_double_labels), device=device),
torch.zeros(len(fake_double_labels), device=device),
],
)
pairwise_dist = torch.cdist(embeddings, embeddings)
# Masks: for each row, which items are valid as a positive or negative item
# Positive items: same label, opposite realness
# Negative items: diff label, opposite realness
positive_mask, negative_mask = get_masks(labels, is_real)
positive_mask = positive_mask.float()
negative_mask = negative_mask.float()
# Subset to rows with at least 1 positive and at least 1 negative so we can form a triplet
subset_idx = (positive_mask.sum(1) > 0) & (negative_mask.sum(1) > 0)
if subset_idx.sum() == 0:
return torch.tensor(0.0).to(embeddings.device)
pairwise_dist = pairwise_dist[subset_idx, :]
positive_mask = positive_mask[subset_idx, :]
negative_mask = negative_mask[subset_idx, :]
# Use mask to zero out any distances where (a, p) not valid.
# (a, p) is valid if label(a) == label(p) and is_real(a) != is_real(p)
# Thus when we select the largest dist, we'll select a valid positive
anchor_positive_dist = positive_mask * pairwise_dist
# shape (batch_size, 1)
hardest_positive_dist, _ = anchor_positive_dist.max(1, keepdim=True)
# For each anchor, get the hardest negative
# We add the maximum value in each row to the invalid negatives (label(a) == label(n))
# Thus when we select the minimum dist, we'll select a valid negative
max_anchor_negative_dist, _ = pairwise_dist.max(1, keepdim=True)
anchor_negative_dist = pairwise_dist + max_anchor_negative_dist * (1.0 - negative_mask)
# shape (batch_size,)
hardest_negative_dist, _ = anchor_negative_dist.min(1, keepdim=True)
# Combine biggest d(a, p) and smallest d(a, n) into final triplet loss
triplet_loss = F.relu(hardest_positive_dist - hardest_negative_dist + self.margin, inplace=True).mean()
return triplet_loss
# Path: com_hom_emg/scoring.py
def get_combo_conf_mat(y_true_2d, y_pred_2d, normalize=True):
"""We get a confusion matrix of shape (25, 25). Row is true class, col is predicted.
Entries are arranged like this:
(D1, None), ..., (D4, None), (None, M1), ..., (None, M4), (D1, M1), ...
(D1, M4), (D2, M1), ... (D2, M4), ... (D4, M4), (None, None)
where D1 ... D4 are directions in order of appearance from DIRECTION_GESTURES
and M1 ... M4 are modifiers in order of appearance from MODIFIER_GESTURES.
This means the first 4 rows are each "direction-only" label, next 4 are "modifier-only" labels."""
cm = np.zeros((len(CANONICAL_COORDS), len(CANONICAL_COORDS)))
for yt, yp in zip(y_true_2d, y_pred_2d):
cm[CANONICAL_COORDS.index(tuple(yt)), CANONICAL_COORDS.index(tuple(yp))] += 1
if normalize:
# NOTE - result may contain nans - use nanmean later
with np.errstate(all="ignore"): # Ignore division by zero for empty rows
cm /= cm.sum(axis=-1, keepdims=True)
return cm
# Path: com_hom_emg/utils.py
PROJECT_PATH = Path(__file__).parent.parent
# Path: com_hom_emg/model.py
from copy import deepcopy
from itertools import chain, product
from pathlib import Path
from loguru import logger
from pytorch_lightning.loggers import TensorBoardLogger
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from torchmetrics.functional import accuracy
from vit_pytorch.simple_vit_1d import SimpleViT
from .basic_arch import EmbeddingNetwork, UnitNormLayer
from .conformer import Conformer
from .data import DataModule, get_per_subj_data, shuffle_together
from .loss import TripletCentroids, TripletLoss, TripletLossHardMining
from .scoring import get_combo_conf_mat
from .utils import PROJECT_PATH
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
class InsufficientDataError(Exception):
...
class DummyIdentity(nn.Module):
# A null embedding. Has a single (unused) parameter to easily use in the same pl training loop
def __init__(self):
super().__init__()
self.param = nn.Parameter(torch.tensor(0.0))
def forward(self, x):
return x.flatten(1)
class MLPClf(nn.Sequential):
def __init__(self, input_dim, output_dim):
layers = [
nn.Linear(input_dim, input_dim * 2, bias=False),
nn.BatchNorm1d(input_dim * 2),
nn.ReLU(inplace=True),
nn.Dropout1d(0.05),
nn.Linear(input_dim * 2, input_dim, bias=False),
nn.BatchNorm1d(input_dim),
nn.ReLU(inplace=True),
nn.Dropout1d(0.05),
nn.Linear(input_dim, output_dim),
]
super().__init__(*layers)
class Avg(nn.Module):
def forward(self, x1, x2, _y1, _y2):
# Note that vector average is elementwise; thus we don't care
# if we have a pair of single vectors or a pair of batches
return (x1 + x2) / 2
class MLPCombine(nn.Module):
def __init__(self, feature_dim):
super().__init__()
self.layer = nn.Sequential(
# Input takes 2 feature vectors, and 2 labels (each one-hot with 5 classes)
nn.Linear(feature_dim * 2 + 5 * 2, feature_dim, bias=False),
nn.BatchNorm1d(feature_dim),
nn.ReLU(inplace=True),
nn.Dropout1d(0.05),
nn.Linear(feature_dim, feature_dim, bias=False),
nn.BatchNorm1d(feature_dim),
nn.ReLU(inplace=True),
nn.Dropout1d(0.05),
nn.Linear(feature_dim, feature_dim),
)
def forward(self, x1, x2, y1, y2):
y1 = F.one_hot(y1, num_classes=5)
y2 = F.one_hot(y2, num_classes=5)
avg = (x1 + x2) / 2
mlp_out = self.layer(torch.cat((x1, x2, y1, y2), dim=-1))
return avg + mlp_out
class CombinePairs(nn.Module):
def __init__(self, combine_fn: nn.Module, normalized_features: bool):
super().__init__()
self.normalized_features = normalized_features
self.combine_fn = combine_fn
def forward(self, x, y):
# Expects data and labels from single gestures
# Labels have the form (direction, modifier)
# where direction in 0, 1, 2, 3 is active, and 4 is NoDir
# same for modifier
device = x.device
dir_idx = y[:, 1] == 4 # When modifier is NoMod
mod_idx = y[:, 0] == 4 # When direction is NoDir
x_dir = x[dir_idx]
y_dir = y[dir_idx, 0]
x_mod = x[mod_idx]
y_mod = y[mod_idx, 1]
if len(x_dir) * len(x_mod) <= 1:
raise InsufficientDataError()
all_x1, all_x2, all_y1, all_y2 = [], [], [], []
for (x1, y1), (x2, y2) in product(zip(x_dir, y_dir), zip(x_mod, y_mod)):
all_x1.append(x1)
all_x2.append(x2)
all_y1.append(y1)
all_y2.append(y2)
all_x1 = torch.stack(all_x1)
all_x2 = torch.stack(all_x2)
all_y1 = torch.stack(all_y1).to(device)
all_y2 = torch.stack(all_y2).to(device)
x_aug = self.combine_fn(all_x1, all_x2, all_y1, all_y2)
y_aug = torch.stack((all_y1, all_y2), dim=-1)
if self.normalized_features:
x_aug = F.normalize(x_aug, dim=-1)
return x_aug, y_aug
def str2bool(s):
if s.lower() in ("yes", "true", "t", "y", "1"):
return True
elif s.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ValueError("Boolean value expected.")
def get_noise(x, desired_SNR):
x_std = x.std()
# SNR = 10 * log10 ( (signal_power) / (noise_power) )
# where signal_power = data_std**2 and noise_power = noise_std**2,
# and SNR is passed as argparse param
| noise_std = x_std / (10 ** (desired_SNR / 20)) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: openai/weak-to-strong
# Path: weak_to_strong/common.py
def get_tokenizer(model_name: str):
"""
This function returns a tokenizer based on the model name.
Parameters:
model_name: The name of the model for which the tokenizer is needed.
Returns:
A tokenizer for the specified model.
"""
return AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
# Path: weak_to_strong/datasets.py
VALID_DATASETS: list[str] = list(_REGISTRY.keys())
# Path: weak_to_strong/datasets.py
def load_dataset(ds_name: str, seed: int = 0, split_sizes: Optional[dict] = None):
if split_sizes is None:
split_sizes = dict(train=None, test=None)
if ds_name not in _REGISTRY:
raise ValueError(f"Unknown dataset {ds_name}, please register")
cfg = _REGISTRY[ds_name]
results = {}
for split, n_docs in split_sizes.items():
ds = cfg.loader(split)
try:
ds = ds.select(range(n_docs))
except IndexError as e:
print(f"Warning {ds_name} has less than {n_docs} docs, using all: {e}")
ds = ds.map(functools.partial(cfg.formatter, rng=Random(seed)))
ds = ds.map(
lambda ex: {"soft_label": [1 - float(ex["hard_label"]), float(ex["hard_label"])]}
)
ds = ds.shuffle(seed=seed) # shuffling a bit pointless for test set but wtv
results[split] = ds
return results
# Path: weak_to_strong/datasets.py
def tokenize_dataset(
raw_ds: HfDataset,
tokenizer: Callable,
max_ctx: int,
):
"""
This function prepares the dataset for training. It takes the raw dataset, a formatting function,
a tokenizer, a maximum context length
Parameters:
raw_ds: The raw dataset to be processed.
tokenizer: The tokenizer to be used on the formatted dataset.
max_ctx: The maximum context length for the tokenizer.
Returns:
ds: The processed and shuffled dataset ready for training.
"""
def process_function(res):
toks = tokenizer(res["txt"])
return dict(
input_ids=toks["input_ids"],
)
ds = raw_ds.map(process_function, batched=False).filter(lambda x: len(x["input_ids"]) < max_ctx)
return ds
# Path: weak_to_strong/loss.py
class logconf_loss_fn(LossFnBase):
"""
This class defines a custom loss function for log confidence.
Attributes:
aux_coef: A float indicating the auxiliary coefficient.
warmup_frac: A float indicating the fraction of total training steps for warmup.
"""
def __init__(
self,
aux_coef: float = 0.5,
warmup_frac: float = 0.1, # in terms of fraction of total training steps
):
self.aux_coef = aux_coef
self.warmup_frac = warmup_frac
def __call__(
self,
logits: torch.Tensor,
labels: torch.Tensor,
step_frac: float,
) -> torch.Tensor:
logits = logits.float()
labels = labels.float()
coef = 1.0 if step_frac > self.warmup_frac else step_frac
coef = coef * self.aux_coef
preds = torch.softmax(logits, dim=-1)
mean_weak = torch.mean(labels, dim=0)
assert mean_weak.shape == (2,)
threshold = torch.quantile(preds[:, 0], mean_weak[1])
strong_preds = torch.cat(
[(preds[:, 0] >= threshold)[:, None], (preds[:, 0] < threshold)[:, None]],
dim=1,
)
target = labels * (1 - coef) + strong_preds.detach() * coef
loss = torch.nn.functional.cross_entropy(logits, target, reduction="none")
return loss.mean()
# Path: weak_to_strong/loss.py
class product_loss_fn(LossFnBase):
"""
This class defines a custom loss function for product of predictions and labels.
Attributes:
alpha: A float indicating how much to weigh the weak model.
beta: A float indicating how much to weigh the strong model.
warmup_frac: A float indicating the fraction of total training steps for warmup.
"""
def __init__(
self,
alpha: float = 1.0, # how much to weigh the weak model
beta: float = 1.0, # how much to weigh the strong model
warmup_frac: float = 0.1, # in terms of fraction of total training steps
):
self.alpha = alpha
self.beta = beta
self.warmup_frac = warmup_frac
def __call__(
self,
logits: torch.Tensor,
labels: torch.Tensor,
step_frac: float,
) -> torch.Tensor:
preds = torch.softmax(logits, dim=-1)
target = torch.pow(preds, self.beta) * torch.pow(labels, self.alpha)
target /= target.sum(dim=-1, keepdim=True)
target = target.detach()
loss = torch.nn.functional.cross_entropy(logits, target, reduction="none")
return loss.mean()
# Path: weak_to_strong/loss.py
class xent_loss(LossFnBase):
def __call__(
self, logits: torch.Tensor, labels: torch.Tensor, step_frac: float
) -> torch.Tensor:
"""
This function calculates the cross entropy loss between logits and labels.
Parameters:
logits: The predicted values.
labels: The actual values.
step_frac: The fraction of total training steps completed.
Returns:
The mean of the cross entropy loss.
"""
loss = torch.nn.functional.cross_entropy(logits, labels)
return loss.mean()
# Path: weak_to_strong/train.py
class ModelConfig:
name: str
default_lr: float
eval_batch_size: int
custom_kwargs: Optional[dict] = None
gradient_checkpointing: bool = False
model_parallel: bool = False
default_optimizer: str = "adam"
# Path: weak_to_strong/train.py
def train_and_save_model(
model_config: ModelConfig,
train_ds: datasets.Dataset,
test_ds: datasets.Dataset,
inference_ds: Optional[datasets.Dataset] = None,
*,
batch_size: int,
lr: float,
epochs: int,
eval_batch_size: Optional[int] = None,
minibatch_size_per_device: Optional[int] = None,
save_path: Optional[str] = None,
loss_fn: Callable = xent_loss,
label: str = "default",
force_retrain: bool = False,
train_with_dropout: bool = False,
linear_probe: bool = False,
lr_schedule: str = "constant",
optimizer_name: str = "adam",
eval_every: Optional[int] = None,
):
if eval_batch_size is None:
eval_batch_size = batch_size
if minibatch_size_per_device is None:
minibatch_size_per_device = 1
gradient_checkpointing = model_config.gradient_checkpointing
custom_kwargs = model_config.custom_kwargs or {}
def maybe_load_model(model):
if os.path.exists(os.path.join(save_path, "results.pkl")) and not force_retrain:
print("loading from", save_path)
checkpoint_path = os.path.join(save_path, "pytorch_model.bin")
if not os.path.exists(checkpoint_path):
# Assume this means we have a sharded checkpoint, and load it appropriately
load_sharded_checkpoint(model, checkpoint_path)
else:
state_dict = torch.load(os.path.join(save_path, "pytorch_model.bin"))
state_dict = {
k.replace("transformer.module", "transformer"): v
for (k, v) in state_dict.items()
}
custom_kwargs["state_dict"] = state_dict
return True
return False
already_trained = False
# Load the model
if model_config.model_parallel:
assert torch.cuda.device_count() > 1, f"you might want more gpus for {model_config.name}"
model = TransformerWithHead.from_pretrained(
model_config.name,
num_labels=2,
device_map="auto",
linear_probe=linear_probe,
**custom_kwargs,
)
already_trained = maybe_load_model(model)
# slight misnomer, more like minibatch_size_per_dp_replica
minibatch_size = minibatch_size_per_device
else:
model = TransformerWithHead.from_pretrained(
model_config.name, num_labels=2, linear_probe=linear_probe, **custom_kwargs
).to("cuda")
already_trained = maybe_load_model(model)
# data parallel: currently not supported with model parallel
if torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model, output_device=0)
minibatch_size = min(minibatch_size_per_device * torch.cuda.device_count(), batch_size)
print(
"Using",
torch.cuda.device_count(),
"GPUs, setting minibatch_size to",
minibatch_size,
)
else:
minibatch_size = minibatch_size_per_device
if already_trained:
test_results = eval_model_acc(model, test_ds, eval_batch_size)
else:
start = time.time()
test_results = train_model(
model,
train_ds,
batch_size,
lr=lr,
epochs=epochs,
eval_ds=test_ds,
gradient_checkpointing=gradient_checkpointing,
loss_fn=loss_fn,
eval_batch_size=eval_batch_size,
eval_every=eval_every,
minibatch_size=minibatch_size,
train_with_dropout=train_with_dropout,
lr_schedule=lr_schedule,
optimizer_name=optimizer_name,
)
print("Model training took", time.time() - start, "seconds")
if save_path:
# Note: If the model is wrapped by DataParallel, we need to unwrap it before saving
(model if hasattr(model, "save_pretrained") else model.module).save_pretrained(
save_path
)
print("saved", save_path)
inference_results = None
if inference_ds:
inference_results = eval_model_acc(model, inference_ds, eval_batch_size)
logger.logkv("inference_accuracy", np.mean([r["acc"] for r in inference_results]))
if save_path:
with open(os.path.join(save_path, "results.pkl"), "wb") as f:
pickle.dump(
{
"avg_acc_test": float(np.mean([r["acc"] for r in test_results])),
"avg_acc_inference": float(
np.mean([r["acc"] for r in inference_results] if inference_results else [])
),
"test_results": test_results,
"inference_results": inference_results if inference_results else [],
},
f,
)
# try to clean up memory
clear_mem()
logger.shutdown()
return test_results, inference_results
# Path: train_simple.py
import json
import os
import random
import subprocess
import fire
import numpy as np
import torch
import weak_to_strong.logger as logger
from typing import Dict, List, Optional
from datasets import load_dataset, load_from_disk
from weak_to_strong.common import get_tokenizer
from weak_to_strong.datasets import (VALID_DATASETS, load_dataset,
tokenize_dataset)
from weak_to_strong.loss import logconf_loss_fn, product_loss_fn, xent_loss
from weak_to_strong.train import ModelConfig, train_and_save_model
gradient_checkpointing=True,
# Should use model_parallel on V100s (note: ironically if you have a single V100 it should run,
# but if you have multiple it won't run without model_parallel because of the overhead of data
# parallel training).
model_parallel=(
torch.cuda.get_device_properties(0).total_memory < 35e9
and torch.cuda.device_count() > 1
),
),
ModelConfig(
name="Qwen/Qwen-1_8B",
default_lr=1e-5,
eval_batch_size=2,
gradient_checkpointing=True,
model_parallel=(
torch.cuda.get_device_properties(0).total_memory < 35e9
and torch.cuda.device_count() > 1
),
custom_kwargs={
"trust_remote_code": True,
"bf16": torch.cuda.is_bf16_supported(),
"fp32": not torch.cuda.is_bf16_supported(),
"revision": "5fde88dff770a7d036847211f5d9d9705f0caa69",
},
),
ModelConfig(
name="Qwen/Qwen-7B",
default_lr=1e-5,
eval_batch_size=2,
gradient_checkpointing=True,
model_parallel=True,
# note: you will probably not be able to run this without many gpus
custom_kwargs={
"trust_remote_code": True,
"bf16": torch.cuda.is_bf16_supported(),
"fp32": not torch.cuda.is_bf16_supported(),
"revision": "d4efd21e866b9cb3466cb65b963933f5e98016d1",
},
),
ModelConfig(
name="Qwen/Qwen-14B",
default_lr=1e-5,
eval_batch_size=2,
gradient_checkpointing=True,
model_parallel=True,
# note: you will probably not be able to run this bf16 support and without many gpus
custom_kwargs={
"trust_remote_code": True,
"bf16": torch.cuda.is_bf16_supported(),
"fp32": not torch.cuda.is_bf16_supported(),
"revision": "8be2854218fea9054331e217fd26a06f3fd02004",
},
),
ModelConfig(
name="Qwen/Qwen-72B",
default_lr=1e-5,
eval_batch_size=1,
gradient_checkpointing=True,
model_parallel=True,
# note: you will probably not be able to run this without bf16 support and many gpus
custom_kwargs={
"trust_remote_code": True,
"bf16": torch.cuda.is_bf16_supported(),
"fp32": not torch.cuda.is_bf16_supported(),
"revision": "fec78c0e3b3b10dd9f0ce775c34a686a3255a7d1",
},
# This model is really big, save space by using adafactor.
# Note that even then it will take up ~60GB per GPU on an 8-GPU machine.
default_optimizer="adafactor",
),
]
MODELS_DICT: Dict[str, ModelConfig] = {
model_config.name: model_config for model_config in MODEL_CONFIGS
}
loss_dict = {
"logconf": logconf_loss_fn(),
"product": product_loss_fn(),
"xent": xent_loss(),
}
VALID_LOSSES: List[str] = list(loss_dict.keys())
def get_config_foldername(config: dict) -> str:
def shorten_key(key: str) -> str:
return "".join(word[0] for word in key.split("_"))
def shorten_value(value) -> str:
if isinstance(value, bool):
return "1" if value else "0"
elif isinstance(value, str):
value = value.split("/")[-1]
if "_" in value:
return "_".join(word[:4] for word in value.split("_"))
else:
return value
else:
return str(value)
return "-".join(f"{shorten_key(k)}={shorten_value(v)}" for k, v in sorted(config.items()))
def main(
batch_size: int = 32,
max_ctx: int = 1024,
ds_name: str = "sciq",
loss: str = "xent",
n_docs: int = 20000,
n_test_docs: int = 10000,
model_size: str = "gpt2",
lr: Optional[float] = None,
optim: Optional[str] = None,
epochs: int = 2,
force_retrain: bool = False,
seed: int = 0,
minibatch_size_per_device: Optional[float] = None,
train_with_dropout: bool = False,
results_folder: str = "/tmp/results",
| linear_probe: bool = False, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: linyiLYi/voice-assistant
# Path: whisper/audio.py
FRAMES_PER_SECOND = SAMPLE_RATE // HOP_LENGTH # 10ms per audio frame
# Path: whisper/audio.py
HOP_LENGTH = 160
# Path: whisper/audio.py
N_FRAMES = N_SAMPLES // HOP_LENGTH # 3000 frames in a mel spectrogram input
# Path: whisper/audio.py
N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000 samples in a 30-second chunk
# Path: whisper/audio.py
SAMPLE_RATE = 16000
# Path: whisper/audio.py
def log_mel_spectrogram(
audio: Union[str, np.ndarray],
n_mels: int = 80,
padding: int = 0,
):
"""
Compute the log-Mel spectrogram of
Parameters
----------
audio: Union[str, np.ndarray, mx.array], shape = (*)
The path to audio or either a NumPy or mlx array containing the audio waveform in 16 kHz
n_mels: int
The number of Mel-frequency filters, only 80 is supported
padding: int
Number of zero samples to pad to the right
Returns
-------
mx.array, shape = (80, n_frames)
An array that contains the Mel spectrogram
"""
device = mx.default_device()
mx.set_default_device(mx.cpu)
if not isinstance(audio, mx.array):
if isinstance(audio, str):
audio = load_audio(audio)
audio = mx.array(audio)
if padding > 0:
audio = mx.pad(audio, (0, padding))
window = hanning(N_FFT)
freqs = stft(audio, window, nperseg=N_FFT, noverlap=HOP_LENGTH)
magnitudes = freqs[:-1, :].abs().square()
filters = mel_filters(n_mels)
mel_spec = magnitudes @ filters.T
log_spec = mx.maximum(mel_spec, 1e-10).log10()
log_spec = mx.maximum(log_spec, log_spec.max() - 8.0)
log_spec = (log_spec + 4.0) / 4.0
mx.set_default_device(device)
return log_spec
# Path: whisper/audio.py
def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):
"""
Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
"""
if array.shape[axis] > length:
sl = [slice(None)] * array.ndim
sl[axis] = slice(0, length)
array = array[tuple(sl)]
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
pad_fn = mx.pad if isinstance(array, mx.array) else np.pad
array = pad_fn(array, pad_widths)
return array
# Path: whisper/decoding.py
class DecodingOptions:
# whether to perform X->X "transcribe" or X->English "translate"
task: str = "transcribe"
# language that the audio is in; uses detected language if None
language: Optional[str] = None
# sampling-related options
temperature: float = 0.0
sample_len: Optional[int] = None # maximum number of tokens to sample
best_of: Optional[int] = None # number of independent sample trajectories, if t > 0
beam_size: Optional[int] = None # number of beams in beam search, if t == 0
patience: Optional[float] = None # patience in beam search (arxiv:2204.05424)
# "alpha" in Google NMT, or None for length norm, when ranking generations
# to select which to return among the beams or best-of-N samples
length_penalty: Optional[float] = None
# text or tokens to feed as the prompt or the prefix; for more info:
# https://github.com/openai/whisper/discussions/117#discussioncomment-3727051
prompt: Optional[Union[str, List[int]]] = None # for the previous context
prefix: Optional[Union[str, List[int]]] = None # to prefix the current context
# list of tokens ids (or comma-separated token ids) to suppress
# "-1" will suppress a set of symbols as defined in `tokenizer.non_speech_tokens()`
suppress_tokens: Optional[Union[str, Iterable[int]]] = "-1"
suppress_blank: bool = True # this will suppress blank outputs
# timestamp sampling options
without_timestamps: bool = False # use <|notimestamps|> to sample text tokens only
max_initial_timestamp: Optional[float] = 1.0
# implementation details
fp16: bool = True # use fp16 for most of the calculation
# Path: whisper/decoding.py
class DecodingResult:
audio_features: mx.array
language: str
language_probs: Optional[Dict[str, float]] = None
tokens: List[int] = field(default_factory=list)
text: str = ""
avg_logprob: float = np.nan
no_speech_prob: float = np.nan
temperature: float = np.nan
compression_ratio: float = np.nan
# Path: whisper/load_models.py
def load_model(
path_or_hf_repo: str,
dtype: mx.Dtype = mx.float32,
) -> whisper.Whisper:
model_path = Path(path_or_hf_repo)
if not model_path.exists():
model_path = Path(
snapshot_download(
repo_id=path_or_hf_repo
)
)
with open(str(model_path / "config.json"), "r") as f:
config = json.loads(f.read())
config.pop("model_type", None)
quantization = config.pop("quantization", None)
model_args = whisper.ModelDimensions(**config)
weights = mx.load(str(model_path / "weights.npz"))
weights = tree_unflatten(list(weights.items()))
model = whisper.Whisper(model_args, dtype)
if quantization is not None:
nn.QuantizedLinear.quantize_module(model, **quantization)
model.update(weights)
mx.eval(model.parameters())
return model
# Path: whisper/timing.py
def add_word_timestamps(
*,
segments: List[dict],
model: "Whisper",
tokenizer: Tokenizer,
mel: mx.array,
num_frames: int,
prepend_punctuations: str = "\"'“¿([{-",
append_punctuations: str = "\"'.。,,!!??::”)]}、",
last_speech_timestamp: float,
**kwargs,
):
if len(segments) == 0:
return
text_tokens_per_segment = [
[token for token in segment["tokens"] if token < tokenizer.eot]
for segment in segments
]
text_tokens = list(itertools.chain.from_iterable(text_tokens_per_segment))
alignment = find_alignment(model, tokenizer, text_tokens, mel, num_frames, **kwargs)
word_durations = np.array([t.end - t.start for t in alignment])
word_durations = word_durations[word_durations.nonzero()]
median_duration = np.median(word_durations) if len(word_durations) > 0 else 0.0
median_duration = min(0.7, float(median_duration))
max_duration = median_duration * 2
# hack: truncate long words at sentence boundaries.
# a better segmentation algorithm based on VAD should be able to replace this.
if len(word_durations) > 0:
sentence_end_marks = ".。!!??"
# ensure words at sentence boundaries are not longer than twice the median word duration.
for i in range(1, len(alignment)):
if alignment[i].end - alignment[i].start > max_duration:
if alignment[i].word in sentence_end_marks:
alignment[i].end = alignment[i].start + max_duration
elif alignment[i - 1].word in sentence_end_marks:
alignment[i].start = alignment[i].end - max_duration
merge_punctuations(alignment, prepend_punctuations, append_punctuations)
time_offset = segments[0]["seek"] * HOP_LENGTH / SAMPLE_RATE
word_index = 0
for segment, text_tokens in zip(segments, text_tokens_per_segment):
saved_tokens = 0
words = []
while word_index < len(alignment) and saved_tokens < len(text_tokens):
timing = alignment[word_index]
if timing.word:
words.append(
dict(
word=timing.word,
start=round(time_offset + timing.start, 2),
end=round(time_offset + timing.end, 2),
probability=timing.probability,
)
)
saved_tokens += len(timing.tokens)
word_index += 1
# hack: truncate long words at segment boundaries.
# a better segmentation algorithm based on VAD should be able to replace this.
if len(words) > 0:
# ensure the first and second word after a pause is not longer than
# twice the median word duration.
if words[0]["end"] - last_speech_timestamp > median_duration * 4 and (
words[0]["end"] - words[0]["start"] > max_duration
or (
len(words) > 1
and words[1]["end"] - words[0]["start"] > max_duration * 2
)
):
if (
len(words) > 1
and words[1]["end"] - words[1]["start"] > max_duration
):
boundary = max(words[1]["end"] / 2, words[1]["end"] - max_duration)
words[0]["end"] = words[1]["start"] = boundary
words[0]["start"] = max(0, words[0]["end"] - max_duration)
# prefer the segment-level start timestamp if the first word is too long.
if (
segment["start"] < words[0]["end"]
and segment["start"] - 0.5 > words[0]["start"]
):
words[0]["start"] = max(
0, min(words[0]["end"] - median_duration, segment["start"])
)
else:
segment["start"] = words[0]["start"]
# prefer the segment-level end timestamp if the last word is too long.
if (
segment["end"] > words[-1]["start"]
and segment["end"] + 0.5 < words[-1]["end"]
):
words[-1]["end"] = max(
words[-1]["start"] + median_duration, segment["end"]
)
else:
segment["end"] = words[-1]["end"]
last_speech_timestamp = segment["end"]
segment["words"] = words
# Path: whisper/tokenizer.py
LANGUAGES = {
"en": "english",
"zh": "chinese",
"de": "german",
"es": "spanish",
"ru": "russian",
"ko": "korean",
"fr": "french",
"ja": "japanese",
"pt": "portuguese",
"tr": "turkish",
"pl": "polish",
"ca": "catalan",
"nl": "dutch",
"ar": "arabic",
"sv": "swedish",
"it": "italian",
"id": "indonesian",
"hi": "hindi",
"fi": "finnish",
"vi": "vietnamese",
"he": "hebrew",
"uk": "ukrainian",
"el": "greek",
"ms": "malay",
"cs": "czech",
"ro": "romanian",
"da": "danish",
"hu": "hungarian",
"ta": "tamil",
"no": "norwegian",
"th": "thai",
"ur": "urdu",
"hr": "croatian",
"bg": "bulgarian",
"lt": "lithuanian",
"la": "latin",
"mi": "maori",
"ml": "malayalam",
"cy": "welsh",
"sk": "slovak",
"te": "telugu",
"fa": "persian",
"lv": "latvian",
"bn": "bengali",
"sr": "serbian",
"az": "azerbaijani",
"sl": "slovenian",
"kn": "kannada",
"et": "estonian",
"mk": "macedonian",
"br": "breton",
"eu": "basque",
"is": "icelandic",
"hy": "armenian",
"ne": "nepali",
"mn": "mongolian",
"bs": "bosnian",
"kk": "kazakh",
"sq": "albanian",
"sw": "swahili",
"gl": "galician",
"mr": "marathi",
"pa": "punjabi",
"si": "sinhala",
"km": "khmer",
"sn": "shona",
"yo": "yoruba",
"so": "somali",
"af": "afrikaans",
"oc": "occitan",
"ka": "georgian",
"be": "belarusian",
"tg": "tajik",
"sd": "sindhi",
"gu": "gujarati",
"am": "amharic",
"yi": "yiddish",
"lo": "lao",
"uz": "uzbek",
"fo": "faroese",
"ht": "haitian creole",
"ps": "pashto",
"tk": "turkmen",
"nn": "nynorsk",
"mt": "maltese",
"sa": "sanskrit",
"lb": "luxembourgish",
"my": "myanmar",
"bo": "tibetan",
"tl": "tagalog",
"mg": "malagasy",
"as": "assamese",
"tt": "tatar",
"haw": "hawaiian",
"ln": "lingala",
"ha": "hausa",
"ba": "bashkir",
"jw": "javanese",
"su": "sundanese",
"yue": "cantonese",
}
# Path: whisper/tokenizer.py
@lru_cache(maxsize=None)
def get_tokenizer(
multilingual: bool,
*,
num_languages: int = 99,
language: Optional[str] = None,
task: Optional[str] = None, # Literal["transcribe", "translate", None]
) -> Tokenizer:
if language is not None:
language = language.lower()
if language not in LANGUAGES:
if language in TO_LANGUAGE_CODE:
language = TO_LANGUAGE_CODE[language]
else:
raise ValueError(f"Unsupported language: {language}")
if multilingual:
encoding_name = "multilingual"
language = language or "en"
task = task or "transcribe"
else:
encoding_name = "gpt2"
language = None
task = None
encoding = get_encoding(name=encoding_name, num_languages=num_languages)
return Tokenizer(
encoding=encoding, num_languages=num_languages, language=language, task=task
)
# Path: whisper/transcribe.py
import sys
import warnings
import mlx.core as mx
import numpy as np
import tqdm
from typing import List, Optional, Tuple, Union
from .audio import (
FRAMES_PER_SECOND,
HOP_LENGTH,
N_FRAMES,
N_SAMPLES,
SAMPLE_RATE,
log_mel_spectrogram,
pad_or_trim,
)
from .decoding import DecodingOptions, DecodingResult
from .load_models import load_model
from .timing import add_word_timestamps
from .tokenizer import LANGUAGES, get_tokenizer
# Copyright © 2023 Apple Inc.
def _format_timestamp(seconds: float):
assert seconds >= 0, "non-negative timestamp expected"
milliseconds = round(seconds * 1000.0)
hours = milliseconds // 3_600_000
milliseconds -= hours * 3_600_000
minutes = milliseconds // 60_000
| milliseconds -= minutes * 60_000 |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: SqueezeAILab/LLMCompiler
# Path: src/agents/tools.py
class InvalidTool(BaseTool):
def _run(
self,
requested_tool_name: str,
available_tool_names: List[str],
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
async def _arun(
self,
requested_tool_name: str,
available_tool_names: List[str],
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
# Path: src/chains/llm_math_chain.py
class LLMMathChain(Chain):
"""Chain that interprets a prompt and executes python code to do math.
Example:
.. code-block:: python
from langchain import LLMMathChain, OpenAI
llm_math = LLMMathChain.from_llm(OpenAI())
"""
llm_chain: LLMChain
llm: Optional[BaseLanguageModel] = None
"""[Deprecated] LLM wrapper to use."""
prompt: BasePromptTemplate = PROMPT
"""[Deprecated] Prompt to use to translate to python if necessary."""
input_key: str = "question" #: :meta private:
output_key: str = "answer" #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def raise_deprecation(cls, values: Dict) -> Dict:
if "llm" in values:
warnings.warn(
"Directly instantiating an LLMMathChain with an llm is deprecated. "
"Please instantiate with llm_chain argument or using the from_llm "
"class method."
)
if "llm_chain" not in values and values["llm"] is not None:
prompt = values.get("prompt", PROMPT)
values["llm_chain"] = LLMChain(llm=values["llm"], prompt=prompt)
return values
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Expect output key.
:meta private:
"""
return [self.output_key]
def _evaluate_expression(self, expression: str) -> str:
try:
expression = expression.strip()
# Remove any commas in between digits from the expression
# e.g. "1,000,000, 2,000,000" -> "1000000, 2000000"
expression = re.sub(r"(?<=\d),(?=\d)", "", expression)
local_dict = {"pi": math.pi, "e": math.e}
# Handle min and max functions
expression = replace_min_max_functions(expression)
output = str(
numexpr.evaluate(
expression,
global_dict={}, # restrict access to globals
local_dict=local_dict, # add common mathematical functions
)
)
except Exception as e:
print(
f'LLMMathChain._evaluate("{expression}") raised error: {e}.'
" Please try again with a valid numerical expression"
)
return "error at math chain"
# Remove any leading and trailing brackets from the output
return re.sub(r"^\[|\]$", "", output)
def _process_llm_result(
self, llm_output: str, run_manager: CallbackManagerForChainRun
) -> Dict[str, str]:
run_manager.on_text(llm_output, color="green", verbose=self.verbose)
llm_output = llm_output.strip()
text_match = re.search(r"^```text(.*?)```", llm_output, re.DOTALL)
if text_match:
expression = text_match.group(1)
output = self._evaluate_expression(expression)
run_manager.on_text("\nAnswer: ", verbose=self.verbose)
run_manager.on_text(output, color="yellow", verbose=self.verbose)
answer = "Answer: " + output
elif llm_output.startswith("Answer:"):
answer = llm_output
elif "Answer:" in llm_output:
answer = "Answer: " + llm_output.split("Answer:")[-1]
else:
return {self.output_key: "Answer: error at math chain"}
# raise ValueError(f"unknown format from LLM: {llm_output}")
return {self.output_key: answer}
async def _aprocess_llm_result(
self,
llm_output: str,
run_manager: AsyncCallbackManagerForChainRun,
) -> Dict[str, str]:
await run_manager.on_text(llm_output, color="green", verbose=self.verbose)
llm_output = llm_output.strip()
text_match = re.search(r"^```text(.*?)```", llm_output, re.DOTALL)
if text_match:
expression = text_match.group(1)
output = self._evaluate_expression(expression)
await run_manager.on_text("\nAnswer: ", verbose=self.verbose)
await run_manager.on_text(output, color="yellow", verbose=self.verbose)
answer = "Answer: " + output
elif llm_output.startswith("Answer:"):
answer = llm_output
elif "Answer:" in llm_output:
answer = "Answer: " + llm_output.split("Answer:")[-1]
else:
raise ValueError(f"unknown format from LLM: {llm_output}")
return {self.output_key: answer}
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
_run_manager.on_text(inputs[self.input_key])
llm_output = self.llm_chain.predict(
question=inputs[self.input_key],
stop=["```output"],
callbacks=_run_manager.get_child(),
)
return self._process_llm_result(llm_output, _run_manager)
async def _acall(
self,
inputs: Dict[str, str],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
await _run_manager.on_text(inputs[self.input_key])
llm_output = await self.llm_chain.apredict(
question=inputs[self.input_key],
stop=["```output"],
callbacks=_run_manager.get_child(),
)
return await self._aprocess_llm_result(llm_output, _run_manager)
@property
def _chain_type(self) -> str:
return "llm_math_chain"
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: BasePromptTemplate = PROMPT,
**kwargs: Any,
) -> LLMMathChain:
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(llm_chain=llm_chain, **kwargs)
# Path: src/docstore/wikipedia.py
class DocstoreExplorer:
"""Class to assist with exploration of a document store."""
def __init__(self, docstore: ReActWikipedia, char_limit=None, one_sentence=False):
"""Initialize with a docstore, and set initial document to None."""
self.docstore = docstore
self.document: Optional[Document] = None
self.lookup_str = ""
self.lookup_index = 0
self.char_limit = char_limit
self.one_sentence = one_sentence
def search(self, term: str) -> str:
"""Search for a term in the docstore, and if found save."""
result = self.docstore.search(term)
if self.one_sentence:
result = result.split(". ")[0]
if self.char_limit is not None:
result = result[: self.char_limit]
if isinstance(result, Document):
self.document = result
return self._summary
else:
self.document = None
return result
async def asearch(self, term: str) -> str:
"""Search for a term in the docstore, and if found save."""
result = await self.docstore.asearch(term)
if self.one_sentence:
result = result.split(". ")[0]
if self.char_limit is not None:
result = result[: self.char_limit]
if isinstance(result, Document):
self.document = result
return self._summary
else:
self.document = None
return result
def lookup(self, term: str) -> str:
"""Lookup a term in document (if saved)."""
if self.document is None:
raise ValueError("Cannot lookup without a successful search first")
if term.lower() != self.lookup_str:
self.lookup_str = term.lower()
self.lookup_index = 0
else:
self.lookup_index += 1
lookups = [p for p in self._paragraphs if self.lookup_str in p.lower()]
if len(lookups) == 0:
return "No Results"
elif self.lookup_index >= len(lookups):
return "No More Results"
else:
result_prefix = f"(Result {self.lookup_index + 1}/{len(lookups)})"
return f"{result_prefix} {lookups[self.lookup_index]}"
@property
def _summary(self) -> str:
return self._paragraphs[0]
@property
def _paragraphs(self) -> List[str]:
if self.document is None:
raise ValueError("Cannot get paragraphs without a document")
return self.document.page_content.split("\n\n")
# Path: src/docstore/wikipedia.py
class ReActWikipedia(Docstore):
"""Wrapper around wikipedia API."""
def __init__(self, benchmark=False, skip_retry_when_postprocess=False) -> None:
"""Check that wikipedia package is installed."""
try:
import requests
from bs4 import BeautifulSoup
except ImportError:
raise ImportError(
"Could not import wikipedia python package. "
"Please install it with `pip install wikipedia`."
)
self.page = None
self.lookup_keyword = None
self.lookup_list = None
self.lookup_cnt = None
self.benchmark = benchmark
self.all_times = []
# when True, always skip retry when postprocess
self.skip_retry_when_postprocess = skip_retry_when_postprocess
def reset(self):
self.all_times = []
def get_stats(self):
return {
"all_times": self.all_times,
}
@staticmethod
def _get_page_obs(page):
# find all paragraphs
paragraphs = page.split("\n")
paragraphs = [p.strip() for p in paragraphs if p.strip()]
# find all sentence
sentences = []
for p in paragraphs:
sentences += p.split(". ")
sentences = [s.strip() + "." for s in sentences if s.strip()]
return " ".join(sentences[:5])
def _get_alternative(self, result: str) -> str:
parsed_alternatives = result.split("Similar: ")[1][:-1]
alternatives = ast.literal_eval(parsed_alternatives)
alternative = alternatives[0]
for alt in alternatives:
if "film" in alt or "movie" in alt:
alternative = alt
break
return alternative
def post_process(
self, response_text: str, entity: str, skip_retry_when_postprocess: bool = False
) -> str:
soup = BeautifulSoup(response_text, features="html.parser")
result_divs = soup.find_all("div", {"class": "mw-search-result-heading"})
if result_divs: # mismatch
self.result_titles = [
clean_str(div.get_text().strip()) for div in result_divs
]
obs = f"Could not find {entity}. Similar: {self.result_titles[:5]}."
else:
page = [
p.get_text().strip() for p in soup.find_all("p") + soup.find_all("ul")
]
if any("may refer to:" in p for p in page):
if skip_retry_when_postprocess or self.skip_retry_when_postprocess:
obs = "Could not find " + entity + "."
else:
obs = self.search("[" + entity + "]", is_retry=True)
else:
self.page = ""
for p in page:
if len(p.split(" ")) > 2:
self.page += clean_str(p)
if not p.endswith("\n"):
self.page += "\n"
obs = self._get_page_obs(self.page)
self.lookup_keyword = self.lookup_list = self.lookup_cnt = None
obs = obs.replace("\\n", "")
return obs
async def apost_process(
self, response_text: str, entity: str, skip_retry_when_postprocess: bool = False
) -> str:
soup = BeautifulSoup(response_text, features="html.parser")
result_divs = soup.find_all("div", {"class": "mw-search-result-heading"})
if result_divs: # mismatch
self.result_titles = [
clean_str(div.get_text().strip()) for div in result_divs
]
obs = f"Could not find {entity}. Similar: {self.result_titles[:5]}."
else:
page = [
p.get_text().strip() for p in soup.find_all("p") + soup.find_all("ul")
]
if any("may refer to:" in p for p in page):
if skip_retry_when_postprocess or self.skip_retry_when_postprocess:
obs = "Could not find " + entity + "."
else:
obs = await self.asearch("[" + entity + "]", is_retry=True)
else:
self.page = ""
for p in page:
if len(p.split(" ")) > 2:
self.page += clean_str(p)
if not p.endswith("\n"):
self.page += "\n"
obs = self._get_page_obs(self.page)
self.lookup_keyword = self.lookup_list = self.lookup_cnt = None
obs = obs.replace("\\n", "")
return obs
def search(self, entity: str, is_retry: bool = False) -> Union[str, Document]:
"""Try to search for wiki page.
If page exists, return the page summary, and a PageWithLookups object.
If page does not exist, return similar entries.
Args:
entity: entity string.
Returns: a Document object or error message.
"""
s = time.time()
entity = str(entity)
entity_ = entity.replace(" ", "+")
search_url = f"https://en.wikipedia.org/w/index.php?search={entity_}"
response_text = requests.get(search_url).text
result = self.post_process(response_text, entity)
if "Similar:" in result:
alternative = self._get_alternative(result)
entity_ = alternative.replace(" ", "+")
search_url = f"https://en.wikipedia.org/w/index.php?search={entity_}"
response_text = requests.get(search_url).text
result = self.post_process(
response_text, entity, skip_retry_when_postprocess=True
)
if "Similar:" in result:
result = "Could not find " + entity + "."
if self.benchmark and not is_retry:
# we only benchmark the outermost call
self.all_times.append(round(time.time() - s, 2))
return result
async def asearch(
self, entity: str, is_retry: bool = False
) -> Union[str, Document]:
"""Try to search for wiki page.
If page exists, return the page summary, and a PageWithLookups object.
If page does not exist, return similar entries.
Args:
entity: entity string.
Returns: a Document object or error message.
"""
s = time.time()
entity = str(entity)
entity_ = entity.replace(" ", "+")
search_url = f"https://en.wikipedia.org/w/index.php?search={entity_}"
async with aiohttp.ClientSession() as session:
async with session.get(search_url) as response:
response_text = await response.text()
result = await self.apost_process(response_text, entity)
if "Similar:" in result:
alternative = self._get_alternative(result)
entity_ = alternative.replace(" ", "+")
search_url = f"https://en.wikipedia.org/w/index.php?search={entity_}"
async with aiohttp.ClientSession() as session:
async with session.get(search_url) as response:
response_text = await response.text()
result = await self.apost_process(
response_text, entity, skip_retry_when_postprocess=True
)
if "Similar:" in result:
return "Could not find " + entity + "."
if self.benchmark and not is_retry:
# we only benchmark the outermost call
self.all_times.append(round(time.time() - s, 2))
return result
# Path: configs/parallelqa/tools.py
from langchain.chat_models import ChatOpenAI
from src.agents.tools import Tool
from src.chains.llm_math_chain import LLMMathChain
from src.docstore.wikipedia import DocstoreExplorer, ReActWikipedia
_MATH_DESCRIPTION = (
"math(problem: str, context: Optional[list[str]]) -> float:\n"
" - Solves the provided math problem.\n"
' - `problem` can be either a simple math problem (e.g. "1 + 3") or a word problem (e.g. "how many apples are there if there are 3 apples and 2 apples").\n'
" - You cannot calculate multiple expressions in one call. For instance, `math('1 + 3, 2 + 4')` does not work. "
"If you need to calculate multiple expressions, you need to call them separately like `math('1 + 3')` and then `math('2 + 4')`\n"
" - Minimize the number of `math` actions as much as possible. For instance, instead of calling "
'2. math("what is the 10% of $1") and then call 3. math("$1 + $2"), '
'you MUST call 2. math("what is the 110% of $1") instead, which will reduce the number of math actions.\n'
# Context specific rules below
" - You can optionally provide a list of strings as `context` to help the agent solve the problem. "
"If there are multiple contexts you need to answer the question, you can provide them as a list of strings.\n"
" - `math` action will not see the output of the previous actions unless you provide it as `context`. "
"You MUST provide the output of the previous actions as `context` if you need to do math on it.\n"
" - You MUST NEVER provide `search` action's outputs as a variable in the `problem` argument. "
"This is because `search` returns a text blob that contains the information about the entity, not a number or value. "
"Therefore, when you need to provide an output of `search` action, you MUST provide it as a `context` argument to `math` action. "
'For example, 1. search("Barack Obama") and then 2. math("age of $1") is NEVER allowed. '
'Use 2. math("age of Barack Obama, context=["$1"]) instead.\n'
" - When you ask a question about `context`, specify the units. "
'For instance, "what is xx in height?" or "what is xx in millions?" instead of "what is xx?"\n'
)
def run_llm_math_chain_factory(llm_math_chain):
async def run_llm_math_chain(question, context=None):
if context is None:
| prompt = question |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: open-compass/MixtralKit
# Path: mixtralkit/layers/utils.py
class ModelArgs:
dim: int = 4096
n_layers: int = 32
n_heads: int = 32
n_kv_heads: Optional[int] = None
vocab_size: int = -1 # defined later by tokenizer
multiple_of: int = 256 # make SwiGLU hidden layer size multiple of large power of 2
ffn_dim_multiplier: Optional[float] = None
norm_eps: float = 1e-5
max_batch_size: int = 32
max_seq_len: int = 2048
# Path: mixtralkit/layers/attention.py
class TorchAttention(nn.Module):
"""Multi-head attention module."""
def __init__(self, args: ModelArgs):
"""
Initialize the Attention module.
Args:
args (ModelArgs): Model configuration parameters.
Attributes:
n_kv_heads (int): Number of key and value heads.
n_local_heads (int): Number of local query heads.
n_local_kv_heads (int): Number of local key and value heads.
n_rep (int): Number of repetitions for local heads.
head_dim (int): Dimension size of each attention head.
wq (ColumnParallelLinear): Linear transformation for queries.
wk (ColumnParallelLinear): Linear transformation for keys.
wv (ColumnParallelLinear): Linear transformation for values.
wo (RowParallelLinear): Linear transformation for output.
cache_k (torch.Tensor): Cached keys for attention.
cache_v (torch.Tensor): Cached values for attention.
"""
super().__init__()
self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads
model_parallel_size = 1
self.n_local_heads = args.n_heads // model_parallel_size
self.n_local_kv_heads = self.n_kv_heads // model_parallel_size
self.n_rep = self.n_local_heads // self.n_local_kv_heads
self.head_dim = args.dim // args.n_heads
self.wq = nn.Linear(
args.dim,
args.n_heads * self.head_dim,
bias=False,
)
self.wk = nn.Linear(
args.dim,
self.n_kv_heads * self.head_dim,
bias=False,
)
self.wv = nn.Linear(
args.dim,
self.n_kv_heads * self.head_dim,
bias=False,
)
self.wo = nn.Linear(
args.n_heads * self.head_dim,
args.dim,
bias=False,
)
self.cache_k = torch.zeros(
(
args.max_batch_size,
args.max_seq_len,
self.n_local_kv_heads,
self.head_dim,
)
).cuda()
self.cache_v = torch.zeros(
(
args.max_batch_size,
args.max_seq_len,
self.n_local_kv_heads,
self.head_dim,
)
).cuda()
def forward(
self,
x: torch.Tensor,
start_pos: int,
freqs_cis: torch.Tensor,
mask: Optional[torch.Tensor],
):
"""
Forward pass of the attention module.
Args:
x (torch.Tensor): Input tensor.
start_pos (int): Starting position for caching.
freqs_cis (torch.Tensor): Precomputed frequency tensor.
mask (torch.Tensor, optional): Attention mask tensor.
Returns:
torch.Tensor: Output tensor after attention.
"""
bsz, seqlen, _ = x.shape
xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim)
xk = xk.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
xv = xv.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis)
self.cache_k = self.cache_k.to(xq)
self.cache_v = self.cache_v.to(xq)
self.cache_k[:bsz, start_pos : start_pos + seqlen] = xk
self.cache_v[:bsz, start_pos : start_pos + seqlen] = xv
keys = self.cache_k[:bsz, : start_pos + seqlen]
values = self.cache_v[:bsz, : start_pos + seqlen]
# repeat k/v heads if n_kv_heads < n_heads
keys = repeat_kv(keys, self.n_rep) # (bs, cache_len + seqlen, n_local_heads, head_dim)
values = repeat_kv(values, self.n_rep) # (bs, cache_len + seqlen, n_local_heads, head_dim)
xq = xq.transpose(1, 2) # (bs, n_local_heads, seqlen, head_dim)
keys = keys.transpose(1, 2) # (bs, n_local_heads, cache_len + seqlen, head_dim)
values = values.transpose(1, 2) # (bs, n_local_heads, cache_len + seqlen, head_dim)
scores = torch.matmul(xq, keys.transpose(2, 3)) / math.sqrt(self.head_dim)
if mask is not None:
scores = scores + mask # (bs, n_local_heads, seqlen, cache_len + seqlen)
scores = F.softmax(scores.float(), dim=-1).type_as(xq)
output = torch.matmul(scores, values) # (bs, n_local_heads, seqlen, head_dim)
output = output.transpose(1, 2).contiguous().view(bsz, seqlen, -1)
return self.wo(output)
# Path: mixtralkit/layers/attention.py
class FairScaleAttention(TorchAttention):
"""Multi-head attention module.
Modified from
"""
def __init__(self, args: ModelArgs):
"""
Initialize the Attention module.
Args:
args (ModelArgs): Model configuration parameters.
Attributes:
n_kv_heads (int): Number of key and value heads.
n_local_heads (int): Number of local query heads.
n_local_kv_heads (int): Number of local key and value heads.
n_rep (int): Number of repetitions for local heads.
head_dim (int): Dimension size of each attention head.
wq (ColumnParallelLinear): Linear transformation for queries.
wk (ColumnParallelLinear): Linear transformation for keys.
wv (ColumnParallelLinear): Linear transformation for values.
wo (RowParallelLinear): Linear transformation for output.
cache_k (torch.Tensor): Cached keys for attention.
cache_v (torch.Tensor): Cached values for attention.
"""
import fairscale.nn.model_parallel.initialize as fs_init
from fairscale.nn.model_parallel.layers import (
ColumnParallelLinear,
RowParallelLinear,
)
super().__init__(args)
self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads
model_parallel_size = fs_init.get_model_parallel_world_size()
self.n_local_heads = args.n_heads // model_parallel_size
self.n_local_kv_heads = self.n_kv_heads // model_parallel_size
self.n_rep = self.n_local_heads // self.n_local_kv_heads
self.head_dim = args.dim // args.n_heads
self.wq = ColumnParallelLinear(
args.dim,
args.n_heads * self.head_dim,
bias=False,
gather_output=False,
init_method=lambda x: x,
)
self.wk = ColumnParallelLinear(
args.dim,
self.n_kv_heads * self.head_dim,
bias=False,
gather_output=False,
init_method=lambda x: x,
)
self.wv = ColumnParallelLinear(
args.dim,
self.n_kv_heads * self.head_dim,
bias=False,
gather_output=False,
init_method=lambda x: x,
)
self.wo = RowParallelLinear(
args.n_heads * self.head_dim,
args.dim,
bias=False,
input_is_parallel=True,
init_method=lambda x: x,
)
self.cache_k = torch.zeros(
(
args.max_batch_size,
args.max_seq_len,
self.n_local_kv_heads,
self.head_dim,
)
).cuda()
self.cache_v = torch.zeros(
(
args.max_batch_size,
args.max_seq_len,
self.n_local_kv_heads,
self.head_dim,
)
).cuda()
# Path: mixtralkit/layers/ffn.py
class TorchFFN(nn.Module):
def __init__(
self,
dim: int,
hidden_dim: int,
):
"""
Initialize the FeedForward module.
Args:
dim (int): Input dimension.
hidden_dim (int): Hidden dimension of the feedforward layer.
multiple_of (int): Value to ensure hidden dimension is a multiple of this value.
ffn_dim_multiplier (float, optional): Custom multiplier for hidden dimension. Defaults to None.
Attributes:
w1 (ColumnParallelLinear): Linear transformation for the first layer.
w2 (RowParallelLinear): Linear transformation for the second layer.
w3 (ColumnParallelLinear): Linear transformation for the third layer.
"""
super().__init__()
self.w1 = nn.Linear(
dim, hidden_dim, bias=False
)
self.w2 = nn.Linear(
hidden_dim, dim, bias=False
)
self.w3 = nn.Linear(
dim, hidden_dim, bias=False
)
def forward(self, x):
device = x.device
x = x.to(self.w1.weight.device)
return self.w2(F.silu(self.w1(x)) * self.w3(x)).to(device)
# Path: mixtralkit/layers/ffn.py
class FairScaleFFN(nn.Module):
def __init__(
self,
dim: int,
hidden_dim: int,
multiple_of: int,
ffn_dim_multiplier: Optional[float],
):
"""
Initialize the FeedForward module.
Args:
dim (int): Input dimension.
hidden_dim (int): Hidden dimension of the feedforward layer.
multiple_of (int): Value to ensure hidden dimension is a multiple of this value.
ffn_dim_multiplier (float, optional): Custom multiplier for hidden dimension. Defaults to None.
Attributes:
w1 (ColumnParallelLinear): Linear transformation for the first layer.
w2 (RowParallelLinear): Linear transformation for the second layer.
w3 (ColumnParallelLinear): Linear transformation for the third layer.
"""
super().__init__()
hidden_dim = int(2 * hidden_dim / 3)
# custom dim factor multiplier
if ffn_dim_multiplier is not None:
hidden_dim = int(ffn_dim_multiplier * hidden_dim)
hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
from fairscale.nn.model_parallel.layers import (
ColumnParallelLinear,
RowParallelLinear,
)
self.w1 = ColumnParallelLinear(
dim, hidden_dim, bias=False, gather_output=False, init_method=lambda x: x
)
self.w2 = RowParallelLinear(
hidden_dim, dim, bias=False, input_is_parallel=True, init_method=lambda x: x
)
self.w3 = ColumnParallelLinear(
dim, hidden_dim, bias=False, gather_output=False, init_method=lambda x: x
)
def forward(self, x):
return self.w2(F.silu(self.w1(x)) * self.w3(x))
# Path: mixtralkit/layers/transformer.py
class TorchTransformerBlock(nn.Module):
def __init__(self, layer_id: int, args: ModelArgs):
"""
Initialize a TransformerBlock.
Args:
layer_id (int): Identifier for the layer.
args (ModelArgs): Model configuration parameters.
Attributes:
n_heads (int): Number of attention heads.
dim (int): Dimension size of the model.
head_dim (int): Dimension size of each attention head.
attention (Attention): Attention module.
feed_forward (FeedForward): FeedForward module.
layer_id (int): Identifier for the layer.
attention_norm (RMSNorm): Layer normalization for attention output.
ffn_norm (RMSNorm): Layer normalization for feedforward output.
"""
super().__init__()
self.n_heads = args.n_heads
self.dim = args.dim
self.head_dim = args.dim // args.n_heads
self.attention = TorchAttention(args)
self.feed_forward = TorchFFN(
dim=args.dim,
hidden_dim=4 * args.dim,
)
self.layer_id = layer_id
self.attention_norm = RMSNorm(args.dim, eps=args.norm_eps)
self.ffn_norm = RMSNorm(args.dim, eps=args.norm_eps)
def forward(
self,
x: torch.Tensor,
start_pos: int,
freqs_cis: torch.Tensor,
mask: Optional[torch.Tensor],
):
"""
Perform a forward pass through the TransformerBlock.
Args:
x (torch.Tensor): Input tensor.
start_pos (int): Starting position for attention caching.
freqs_cis (torch.Tensor): Precomputed cosine and sine frequencies.
mask (torch.Tensor, optional): Masking tensor for attention. Defaults to None.
Returns:
torch.Tensor: Output tensor after applying attention and feedforward layers.
"""
h = x + self.attention.forward(
self.attention_norm(x), start_pos, freqs_cis, mask
)
out = h + self.feed_forward.forward(self.ffn_norm(h))
return out
# Path: mixtralkit/layers/transformer.py
class TorchTransformer(nn.Module):
def __init__(self, params: ModelArgs):
"""
Initialize a Transformer model.
Args:
params (ModelArgs): Model configuration parameters.
Attributes:
params (ModelArgs): Model configuration parameters.
vocab_size (int): Vocabulary size.
n_layers (int): Number of layers in the model.
tok_embeddings (ParallelEmbedding): Token embeddings.
layers (torch.nn.ModuleList): List of Transformer blocks.
norm (RMSNorm): Layer normalization for the model output.
output (ColumnParallelLinear): Linear layer for final output.
freqs_cis (torch.Tensor): Precomputed cosine and sine frequencies.
"""
super().__init__()
self.params = params
self.vocab_size = params.vocab_size
self.n_layers = params.n_layers
self.tok_embeddings = nn.Embedding(
params.vocab_size, params.dim
)
self.layers = torch.nn.ModuleList()
for layer_id in range(params.n_layers):
self.layers.append(TorchTransformerBlock(layer_id, params))
self.norm = RMSNorm(params.dim, eps=params.norm_eps)
self.output = nn.Linear(
params.dim, params.vocab_size, bias=False
)
self.freqs_cis = precompute_freqs_cis(
# Note that self.params.max_seq_len is multiplied by 2 because the token limit for the Llama 2 generation of models is 4096.
# Adding this multiplier instead of using 4096 directly allows for dynamism of token lengths while training or fine-tuning.
dim=self.params.dim // self.params.n_heads,
end=self.params.max_seq_len * 2,
theta=self.params.rope_theta,
)
@torch.inference_mode()
def forward(self, tokens: torch.Tensor, start_pos: int):
"""
Perform a forward pass through the Transformer model.
Args:
tokens (torch.Tensor): Input token indices.
start_pos (int): Starting position for attention caching.
Returns:
torch.Tensor: Output logits after applying the Transformer model.
"""
_bsz, seqlen = tokens.shape
h = self.tok_embeddings(tokens)
self.freqs_cis = self.freqs_cis.to(h.device)
freqs_cis = self.freqs_cis[start_pos : start_pos + seqlen]
mask = None
if seqlen > 1:
mask = torch.full(
(seqlen, seqlen), float("-inf"), device=tokens.device
)
mask = torch.triu(mask, diagonal=1)
# When performing key-value caching, we compute the attention scores
# only for the new sequence. Thus, the matrix of scores is of size
# (seqlen, cache_len + seqlen), and the only masked entries are (i, j) for
# j > cache_len + i, since row i corresponds to token cache_len + i.
mask = torch.hstack([
torch.zeros((seqlen, start_pos), device=tokens.device),
mask
]).type_as(h)
for layer in self.layers:
h = layer(h, start_pos, freqs_cis, mask)
h = self.norm(h)
output = self.output(h).float()
return output
# Path: mixtralkit/layers/transformer.py
class FairScaleTransformer(TorchTransformer):
def __init__(self, params: ModelArgs):
"""
Initialize a Transformer model.
Args:
params (ModelArgs): Model configuration parameters.
Attributes:
params (ModelArgs): Model configuration parameters.
vocab_size (int): Vocabulary size.
n_layers (int): Number of layers in the model.
tok_embeddings (ParallelEmbedding): Token embeddings.
layers (torch.nn.ModuleList): List of Transformer blocks.
norm (RMSNorm): Layer normalization for the model output.
output (ColumnParallelLinear): Linear layer for final output.
freqs_cis (torch.Tensor): Precomputed cosine and sine frequencies.
"""
super().__init__()
self.params = params
self.vocab_size = params.vocab_size
self.n_layers = params.n_layers
from fairscale.nn.model_parallel.layers import (
ColumnParallelLinear,
ParallelEmbedding,
)
self.tok_embeddings = ParallelEmbedding(
params.vocab_size, params.dim, init_method=lambda x: x
)
self.layers = torch.nn.ModuleList()
for layer_id in range(params.n_layers):
self.layers.append(FairScaleTransformerBlock(layer_id, params))
self.norm = RMSNorm(params.dim, eps=params.norm_eps)
self.output = ColumnParallelLinear(
params.dim, params.vocab_size, bias=False, init_method=lambda x: x
)
self.freqs_cis = precompute_freqs_cis(
# Note that self.params.max_seq_len is multiplied by 2 because the token limit for the Llama 2 generation of models is 4096.
# Adding this multiplier instead of using 4096 directly allows for dynamism of token lengths while training or fine-tuning.
dim=self.params.dim // self.params.n_heads,
end=self.params.max_seq_len * 2,
theta=self.params.rope_theta,
)
# Path: mixtralkit/layers/moe.py
import math
import torch
import torch.nn.functional as F
from dataclasses import dataclass
from typing import Dict, Optional, Tuple
from torch import nn
from .utils import ModelArgs
from .attention import TorchAttention, FairScaleAttention
from .ffn import TorchFFN, FairScaleFFN
from .transformer import TorchTransformerBlock, TorchTransformer, FairScaleTransformer
from fairscale.nn.model_parallel.layers import (
ColumnParallelLinear,
)
# Copyright (c) OpenMMLab. and affiliates.
# Copyright (c) Meta Platforms, Inc. and affiliates.
class MoETorchFFN(nn.Module):
def __init__(
self,
num_experts: int,
num_experts_per_tok: int,
num_shards: int,
gate_softmax: bool = False,
**kwargs,
):
super().__init__()
self.experts = nn.ModuleList([
TorchFFN(**kwargs).to(f"cuda:{i//num_shards}")
for i in range(num_experts)]
)
self.gate = nn.Linear(
kwargs["dim"], num_experts, bias=False)
self.num_experts_per_tok = num_experts_per_tok
self.gate_softmax = gate_softmax
print("Softmax for Gate:{}".format(str(gate_softmax)))
def forward(self, x):
orig_shape = x.shape
x = x.view(-1, x.shape[-1])
if self.gate_softmax:
scores = self.gate(x).softmax(dim=-1)
else:
scores = self.gate(x)
expert_weights, expert_indices = torch.topk(
scores, self.num_experts_per_tok, dim=-1)
expert_weights = expert_weights.softmax(dim=-1)
flat_expert_indices = expert_indices.view(-1)
x = x.repeat_interleave(self.num_experts_per_tok, dim=0)
| y = torch.empty_like(x) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: bytedance/ImageDream
# Path: threestudio/models/prompt_processors/base.py
class PromptProcessorOutput:
text_embeddings: Float[Tensor, "N Nf"]
uncond_text_embeddings: Float[Tensor, "N Nf"]
text_embeddings_vd: Float[Tensor, "Nv N Nf"]
uncond_text_embeddings_vd: Float[Tensor, "Nv N Nf"]
directions: List[DirectionConfig]
direction2idx: Dict[str, int]
use_perp_neg: bool
perp_neg_f_sb: Tuple[float, float, float]
perp_neg_f_fsb: Tuple[float, float, float]
perp_neg_f_fs: Tuple[float, float, float]
perp_neg_f_sf: Tuple[float, float, float]
image: Any
def get_text_embeddings(
self,
elevation: Float[Tensor, "B"],
azimuth: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
view_dependent_prompting: bool = True,
) -> Float[Tensor, "BB N Nf"]:
batch_size = elevation.shape[0]
if view_dependent_prompting:
# Get direction
direction_idx = torch.zeros_like(elevation, dtype=torch.long)
for d in self.directions:
direction_idx[
d.condition(elevation, azimuth, camera_distances)
] = self.direction2idx[d.name]
# Get text embeddings
text_embeddings = self.text_embeddings_vd[direction_idx] # type: ignore
uncond_text_embeddings = self.uncond_text_embeddings_vd[direction_idx] # type: ignore
else:
text_embeddings = self.text_embeddings.expand(batch_size, -1, -1) # type: ignore
uncond_text_embeddings = self.uncond_text_embeddings.expand( # type: ignore
batch_size, -1, -1
)
# IMPORTANT: we return (cond, uncond), which is in different order than other implementations!
return torch.cat([text_embeddings, uncond_text_embeddings], dim=0)
def get_text_embeddings_perp_neg(
self,
elevation: Float[Tensor, "B"],
azimuth: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
view_dependent_prompting: bool = True,
) -> Tuple[Float[Tensor, "BBBB N Nf"], Float[Tensor, "B 2"]]:
assert (
view_dependent_prompting
), "Perp-Neg only works with view-dependent prompting"
batch_size = elevation.shape[0]
direction_idx = torch.zeros_like(elevation, dtype=torch.long)
for d in self.directions:
direction_idx[
d.condition(elevation, azimuth, camera_distances)
] = self.direction2idx[d.name]
# 0 - side view
# 1 - front view
# 2 - back view
# 3 - overhead view
pos_text_embeddings = []
neg_text_embeddings = []
neg_guidance_weights = []
uncond_text_embeddings = []
side_emb = self.text_embeddings_vd[0]
front_emb = self.text_embeddings_vd[1]
back_emb = self.text_embeddings_vd[2]
overhead_emb = self.text_embeddings_vd[3]
for idx, ele, azi, dis in zip(
direction_idx, elevation, azimuth, camera_distances
):
azi = shift_azimuth_deg(azi) # to (-180, 180)
uncond_text_embeddings.append(
self.uncond_text_embeddings_vd[idx]
) # should be ""
if idx.item() == 3: # overhead view
pos_text_embeddings.append(overhead_emb) # side view
# dummy
neg_text_embeddings += [
self.uncond_text_embeddings_vd[idx],
self.uncond_text_embeddings_vd[idx],
]
neg_guidance_weights += [0.0, 0.0]
else: # interpolating views
if torch.abs(azi) < 90:
# front-side interpolation
# 0 - complete side, 1 - complete front
r_inter = 1 - torch.abs(azi) / 90
pos_text_embeddings.append(
r_inter * front_emb + (1 - r_inter) * side_emb
)
neg_text_embeddings += [front_emb, side_emb]
neg_guidance_weights += [
-shifted_expotional_decay(*self.perp_neg_f_fs, r_inter),
-shifted_expotional_decay(*self.perp_neg_f_sf, 1 - r_inter),
]
else:
# side-back interpolation
# 0 - complete back, 1 - complete side
r_inter = 2.0 - torch.abs(azi) / 90
pos_text_embeddings.append(
r_inter * side_emb + (1 - r_inter) * back_emb
)
neg_text_embeddings += [side_emb, front_emb]
neg_guidance_weights += [
-shifted_expotional_decay(*self.perp_neg_f_sb, r_inter),
-shifted_expotional_decay(*self.perp_neg_f_fsb, r_inter),
]
text_embeddings = torch.cat(
[
torch.stack(pos_text_embeddings, dim=0),
torch.stack(uncond_text_embeddings, dim=0),
torch.stack(neg_text_embeddings, dim=0),
],
dim=0,
)
return text_embeddings, torch.as_tensor(
neg_guidance_weights, device=elevation.device
).reshape(batch_size, 2)
# Path: threestudio/utils/base.py
class BaseObject(Updateable):
@dataclass
class Config:
pass
cfg: Config # add this to every subclass of BaseObject to enable static type checking
def __init__(
self, cfg: Optional[Union[dict, DictConfig]] = None, *args, **kwargs
) -> None:
super().__init__()
self.cfg = parse_structured(self.Config, cfg)
self.device = get_device()
self.configure(*args, **kwargs)
def configure(self, *args, **kwargs) -> None:
pass
# Path: threestudio/utils/misc.py
def C(value: Any, epoch: int, global_step: int) -> float:
if isinstance(value, int) or isinstance(value, float):
pass
else:
value = config_to_primitive(value)
if not isinstance(value, list):
raise TypeError("Scalar specification only supports list, got", type(value))
if len(value) == 3:
value = [0] + value
assert len(value) == 4
start_step, start_value, end_value, end_step = value
if isinstance(end_step, int):
current_step = global_step
value = start_value + (end_value - start_value) * max(
min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0
)
elif isinstance(end_step, float):
current_step = epoch
value = start_value + (end_value - start_value) * max(
min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0
)
return value
# Path: threestudio/utils/misc.py
def parse_version(ver: str):
return version.parse(ver)
# Path: threestudio/models/guidance/controlnet_guidance.py
import os
import cv2
import numpy as np
import torch
import torch.nn.functional as F
import threestudio
from dataclasses import dataclass
from controlnet_aux import CannyDetector, NormalBaeDetector
from diffusers import ControlNetModel, DDIMScheduler, StableDiffusionControlNetPipeline
from diffusers.utils.import_utils import is_xformers_available
from tqdm import tqdm
from threestudio.models.prompt_processors.base import PromptProcessorOutput
from threestudio.utils.base import BaseObject
from threestudio.utils.misc import C, parse_version
from threestudio.utils.typing import *
from threestudio.utils.config import ExperimentConfig, load_config
from threestudio.utils.typing import Optional
@torch.cuda.amp.autocast(enabled=False)
def forward_control_unet(
self,
latents: Float[Tensor, "..."],
t: Float[Tensor, "..."],
encoder_hidden_states: Float[Tensor, "..."],
cross_attention_kwargs,
down_block_additional_residuals,
mid_block_additional_residual,
) -> Float[Tensor, "..."]:
input_dtype = latents.dtype
return self.unet(
latents.to(self.weights_dtype),
t.to(self.weights_dtype),
encoder_hidden_states=encoder_hidden_states.to(self.weights_dtype),
cross_attention_kwargs=cross_attention_kwargs,
down_block_additional_residuals=down_block_additional_residuals,
mid_block_additional_residual=mid_block_additional_residual,
).sample.to(input_dtype)
@torch.cuda.amp.autocast(enabled=False)
def encode_images(
self, imgs: Float[Tensor, "B 3 512 512"]
) -> Float[Tensor, "B 4 64 64"]:
input_dtype = imgs.dtype
imgs = imgs * 2.0 - 1.0
posterior = self.vae.encode(imgs.to(self.weights_dtype)).latent_dist
latents = posterior.sample() * self.vae.config.scaling_factor
return latents.to(input_dtype)
@torch.cuda.amp.autocast(enabled=False)
def encode_cond_images(
self, imgs: Float[Tensor, "B 3 512 512"]
) -> Float[Tensor, "B 4 64 64"]:
input_dtype = imgs.dtype
imgs = imgs * 2.0 - 1.0
posterior = self.vae.encode(imgs.to(self.weights_dtype)).latent_dist
latents = posterior.mode()
uncond_image_latents = torch.zeros_like(latents)
latents = torch.cat([latents, latents, uncond_image_latents], dim=0)
return latents.to(input_dtype)
@torch.cuda.amp.autocast(enabled=False)
def decode_latents(
self,
latents: Float[Tensor, "B 4 H W"],
latent_height: int = 64,
latent_width: int = 64,
) -> Float[Tensor, "B 3 512 512"]:
input_dtype = latents.dtype
latents = F.interpolate(
latents, (latent_height, latent_width), mode="bilinear", align_corners=False
)
latents = 1 / self.vae.config.scaling_factor * latents
image = self.vae.decode(latents.to(self.weights_dtype)).sample
image = (image * 0.5 + 0.5).clamp(0, 1)
return image.to(input_dtype)
def edit_latents(
self,
text_embeddings: Float[Tensor, "BB 77 768"],
latents: Float[Tensor, "B 4 64 64"],
image_cond: Float[Tensor, "B 3 512 512"],
t: Int[Tensor, "B"],
) -> Float[Tensor, "B 4 64 64"]:
self.scheduler.config.num_train_timesteps = t.item()
self.scheduler.set_timesteps(self.cfg.diffusion_steps)
with torch.no_grad():
# add noise
noise = torch.randn_like(latents)
latents = self.scheduler.add_noise(latents, noise, t) # type: ignore
# sections of code used from https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py
threestudio.debug("Start editing...")
for i, t in enumerate(self.scheduler.timesteps):
# predict the noise residual with unet, NO grad!
with torch.no_grad():
# pred noise
latent_model_input = torch.cat([latents] * 2)
(
down_block_res_samples,
mid_block_res_sample,
) = self.forward_controlnet(
latent_model_input,
t,
encoder_hidden_states=text_embeddings,
image_cond=image_cond,
condition_scale=self.cfg.condition_scale,
)
noise_pred = self.forward_control_unet(
latent_model_input,
t,
encoder_hidden_states=text_embeddings,
cross_attention_kwargs=None,
down_block_additional_residuals=down_block_res_samples,
mid_block_additional_residual=mid_block_res_sample,
)
# perform classifier-free guidance
noise_pred_text, noise_pred_uncond = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + self.cfg.guidance_scale * (
noise_pred_text - noise_pred_uncond
)
# get previous sample, continue loop
latents = self.scheduler.step(noise_pred, t, latents).prev_sample
threestudio.debug("Editing finished.")
return latents
def prepare_image_cond(self, cond_rgb: Float[Tensor, "B H W C"]):
if self.cfg.control_type == "normal":
cond_rgb = (
(cond_rgb[0].detach().cpu().numpy() * 255).astype(np.uint8).copy()
)
detected_map = self.preprocessor(cond_rgb)
control = (
torch.from_numpy(np.array(detected_map)).float().to(self.device) / 255.0
)
control = control.unsqueeze(0)
control = control.permute(0, 3, 1, 2)
elif self.cfg.control_type == "canny":
| cond_rgb = ( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: TencentARC/MotionCtrl
# Path: gradio_utils/camera_utils.py
CAMERA_MOTION_MODE = ["Basic Camera Poses", "Provided Complex Camera Poses", "Custom Camera Poses"]
# Path: gradio_utils/camera_utils.py
def process_camera(camera_dict):
# "First A then B", "Both A and B", "Custom"
if camera_dict['complex'] is not None:
with open(COMPLEX_CAMERA[camera_dict['complex']]) as f:
RT = json.load(f) # [16, 12]
RT = np.array(RT).reshape(-1, 3, 4)
print(RT.shape)
return RT
motion_list = camera_dict['motion']
mode = camera_dict['mode']
speed = camera_dict['speed']
print(len(motion_list))
if len(motion_list) == 0:
angle = np.array([0,0,0])
T = np.array([0,0,0])
RT = get_camera_motion(angle, T, speed, 16)
elif len(motion_list) == 1:
angle = np.array(CAMERA[motion_list[0]]["angle"])
T = np.array(CAMERA[motion_list[0]]["T"])
print(angle, T)
RT = get_camera_motion(angle, T, speed, 16)
elif len(motion_list) == 2:
if mode == "Customized Mode 1: First A then B":
angle = np.array(CAMERA[motion_list[0]]["angle"])
T = np.array(CAMERA[motion_list[0]]["T"])
RT_0 = get_camera_motion(angle, T, speed, 8)
angle = np.array(CAMERA[motion_list[1]]["angle"])
T = np.array(CAMERA[motion_list[1]]["T"])
RT_1 = get_camera_motion(angle, T, speed, 8)
RT = combine_camera_motion(RT_0, RT_1)
elif mode == "Customized Mode 2: Both A and B":
angle = np.array(CAMERA[motion_list[0]]["angle"]) + np.array(CAMERA[motion_list[1]]["angle"])
T = np.array(CAMERA[motion_list[0]]["T"]) + np.array(CAMERA[motion_list[1]]["T"])
RT = get_camera_motion(angle, T, speed, 16)
# return RT.reshape(-1, 12)
return RT
# Path: gradio_utils/traj_utils.py
OBJECT_MOTION_MODE = ["Provided Trajectory", "Custom Trajectory"]
# Path: gradio_utils/traj_utils.py
def get_provided_traj(traj_name):
traj = read_points(PROVIDED_TRAJS[traj_name])
# xrange from 256 to 1024
traj = [[int(1024*x/256), int(1024*y/256)] for x,y in traj]
return traj
# Path: gradio_utils/traj_utils.py
def process_points(points):
frames = 16
defualt_points = [[512,512]]*16
if len(points) < 2:
return defualt_points
elif len(points) >= frames:
skip = len(points)//frames
return points[::skip][:15] + points[-1:]
else:
insert_num = frames - len(points)
insert_num_dict = {}
interval = len(points) - 1
n = insert_num // interval
m = insert_num % interval
for i in range(interval):
insert_num_dict[i] = n
for i in range(m):
insert_num_dict[i] += 1
res = []
for i in range(interval):
insert_points = []
x0,y0 = points[i]
x1,y1 = points[i+1]
delta_x = x1 - x0
delta_y = y1 - y0
for j in range(insert_num_dict[i]):
x = x0 + (j+1)/(insert_num_dict[i]+1)*delta_x
y = y0 + (j+1)/(insert_num_dict[i]+1)*delta_y
insert_points.append([int(x), int(y)])
res += points[i:i+1] + insert_points
res += points[-1:]
return res
# Path: gradio_utils/traj_utils.py
def process_traj(points, device='cpu'):
xy_range = 1024
points = process_points(points)
points = [[int(256*x/xy_range), int(256*y/xy_range)] for x,y in points]
optical_flow = get_flow(points)
# optical_flow = torch.tensor(optical_flow).to(device)
return optical_flow
# Path: gradio_utils/utils.py
def vis_camera(RT_list, rescale_T=1):
fig = go.Figure()
showticklabels = True
visible = True
scene_bounds = 2
base_radius = 2.5
zoom_scale = 1.5
fov_deg = 50.0
edges = [(0, 1), (0, 2), (0, 3), (1, 2), (2, 3), (3, 1), (3, 4)]
colors = px.colors.qualitative.Plotly
cone_list = []
n = len(RT_list)
for i, RT in enumerate(RT_list):
R = RT[:,:3]
T = RT[:,-1]/rescale_T
cone = calc_cam_cone_pts_3d(R, T, fov_deg)
cone_list.append((cone, (i*1/n, "green"), f"view_{i}"))
for (cone, clr, legend) in cone_list:
for (i, edge) in enumerate(edges):
(x1, x2) = (cone[edge[0], 0], cone[edge[1], 0])
(y1, y2) = (cone[edge[0], 1], cone[edge[1], 1])
(z1, z2) = (cone[edge[0], 2], cone[edge[1], 2])
fig.add_trace(go.Scatter3d(
x=[x1, x2], y=[y1, y2], z=[z1, z2], mode='lines',
line=dict(color=clr, width=3),
name=legend, showlegend=(i == 0)))
fig.update_layout(
height=500,
autosize=True,
# hovermode=False,
margin=go.layout.Margin(l=0, r=0, b=0, t=0),
showlegend=True,
legend=dict(
yanchor='bottom',
y=0.01,
xanchor='right',
x=0.99,
),
scene=dict(
aspectmode='manual',
aspectratio=dict(x=1, y=1, z=1.0),
camera=dict(
center=dict(x=0.0, y=0.0, z=0.0),
up=dict(x=0.0, y=-1.0, z=0.0),
eye=dict(x=scene_bounds/2, y=-scene_bounds/2, z=-scene_bounds/2),
),
xaxis=dict(
range=[-scene_bounds, scene_bounds],
showticklabels=showticklabels,
visible=visible,
),
yaxis=dict(
range=[-scene_bounds, scene_bounds],
showticklabels=showticklabels,
visible=visible,
),
zaxis=dict(
range=[-scene_bounds, scene_bounds],
showticklabels=showticklabels,
visible=visible,
)
))
return fig
# Path: lvdm/models/samplers/ddim.py
class DDIMSampler(object):
def __init__(self, model, schedule="linear", **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
self.counter = 0
def register_buffer(self, name, attr):
if type(attr) == torch.Tensor:
if attr.device != torch.device("cuda"):
attr = attr.to(torch.device("cuda"))
setattr(self, name, attr)
def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
alphas_cumprod = self.model.alphas_cumprod
assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
self.register_buffer('betas', to_torch(self.model.betas))
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
# ddim sampling parameters
ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
ddim_timesteps=self.ddim_timesteps,
eta=ddim_eta,verbose=verbose)
self.register_buffer('ddim_sigmas', ddim_sigmas)
self.register_buffer('ddim_alphas', ddim_alphas)
self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
(1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
1 - self.alphas_cumprod / self.alphas_cumprod_prev))
self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
@torch.no_grad()
def sample(self,
S,
batch_size,
shape,
conditioning=None,
callback=None,
normals_sequence=None,
img_callback=None,
quantize_x0=False,
eta=0.,
mask=None,
x0=None,
temperature=1.,
noise_dropout=0.,
score_corrector=None,
corrector_kwargs=None,
verbose=True,
schedule_verbose=False,
x_T=None,
log_every_t=100,
unconditional_guidance_scale=1.,
unconditional_conditioning=None,
# this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
**kwargs
):
# check condition bs
if conditioning is not None:
if isinstance(conditioning, dict):
try:
cbs = conditioning[list(conditioning.keys())[0]].shape[0]
except:
cbs = conditioning[list(conditioning.keys())[0]][0].shape[0]
if cbs != batch_size:
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
else:
if conditioning.shape[0] != batch_size:
print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=schedule_verbose)
# make shape
if len(shape) == 3:
C, H, W = shape
size = (batch_size, C, H, W)
elif len(shape) == 4:
C, T, H, W = shape
size = (batch_size, C, T, H, W)
# print(f'Data shape for DDIM sampling is {size}, eta {eta}')
samples, intermediates = self.ddim_sampling(conditioning, size,
callback=callback,
img_callback=img_callback,
quantize_denoised=quantize_x0,
mask=mask, x0=x0,
ddim_use_original_steps=False,
noise_dropout=noise_dropout,
temperature=temperature,
score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
x_T=x_T,
log_every_t=log_every_t,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
verbose=verbose,
**kwargs)
return samples, intermediates
@torch.no_grad()
def ddim_sampling(self, cond, shape,
x_T=None, ddim_use_original_steps=False,
callback=None, timesteps=None, quantize_denoised=False,
mask=None, x0=None, img_callback=None, log_every_t=100,
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
unconditional_guidance_scale=1., unconditional_conditioning=None, verbose=True,
**kwargs):
device = self.model.betas.device
b = shape[0]
if x_T is None:
img = torch.randn(shape, device=device)
else:
img = x_T
if timesteps is None:
timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
elif timesteps is not None and not ddim_use_original_steps:
subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
timesteps = self.ddim_timesteps[:subset_end]
intermediates = {'x_inter': [img], 'pred_x0': [img]}
time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)
total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
if verbose:
iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)
else:
iterator = time_range
clean_cond = kwargs.pop("clean_cond", False)
for i, step in enumerate(iterator):
index = total_steps - i - 1
ts = torch.full((b,), step, device=device, dtype=torch.long)
# use mask to blend noised original latent (img_orig) & new sampled latent (img)
if mask is not None:
assert x0 is not None
if clean_cond:
img_orig = x0
else:
img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? <ddim inversion>
img = img_orig * mask + (1. - mask) * img # keep original & modify use img
outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
quantize_denoised=quantize_denoised, temperature=temperature,
noise_dropout=noise_dropout, score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
**kwargs)
img, pred_x0 = outs
if callback: callback(i)
if img_callback: img_callback(pred_x0, i)
if index % log_every_t == 0 or index == total_steps - 1:
intermediates['x_inter'].append(img)
intermediates['pred_x0'].append(pred_x0)
return img, intermediates
@torch.no_grad()
def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
unconditional_guidance_scale=1., unconditional_conditioning=None,
uc_type=None, conditional_guidance_scale_temporal=None, **kwargs):
b, *_, device = *x.shape, x.device
if x.dim() == 5:
is_video = True
else:
is_video = False
# f=open('/apdcephfs_cq2/share_1290939/yingqinghe/code/LVDM-private/cfg_range_s5noclamp.txt','a')
# print(f't={t}, model input, min={torch.min(x)}, max={torch.max(x)}',file=f)
if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
e_t = self.model.apply_model(x, t, c, **kwargs) # unet denoiser
else:
# with unconditional condition
if isinstance(c, torch.Tensor):
un_kwargs = kwargs.copy()
if isinstance(unconditional_conditioning, dict):
for uk, uv in unconditional_conditioning.items():
if uk in un_kwargs:
un_kwargs[uk] = uv
unconditional_conditioning = unconditional_conditioning['uc']
if 'cond_T' in kwargs and t < kwargs['cond_T']:
if 'features_adapter' in kwargs:
kwargs.pop('features_adapter')
un_kwargs.pop('features_adapter')
# kwargs['features_adapter'] = None
# un_kwargs['features_adapter'] = None
# if 'pose_emb' in kwargs:
# kwargs.pop('pose_emb')
# un_kwargs.pop('pose_emb')
# kwargs['pose_emb'] = None
# un_kwargs['pose_emb'] = None
e_t = self.model.apply_model(x, t, c, **kwargs)
# e_t_uncond = self.model.apply_model(x, t, unconditional_conditioning, **kwargs)
e_t_uncond = self.model.apply_model(x, t, unconditional_conditioning, **un_kwargs)
elif isinstance(c, dict):
e_t = self.model.apply_model(x, t, c, **kwargs)
e_t_uncond = self.model.apply_model(x, t, unconditional_conditioning, **kwargs)
else:
raise NotImplementedError
# text cfg
if uc_type is None:
e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
else:
if uc_type == 'cfg_original':
e_t = e_t + unconditional_guidance_scale * (e_t - e_t_uncond)
elif uc_type == 'cfg_ours':
e_t = e_t + unconditional_guidance_scale * (e_t_uncond - e_t)
else:
raise NotImplementedError
# temporal guidance
if conditional_guidance_scale_temporal is not None:
e_t_temporal = self.model.apply_model(x, t, c, **kwargs)
e_t_image = self.model.apply_model(x, t, c, no_temporal_attn=True, **kwargs)
e_t = e_t + conditional_guidance_scale_temporal * (e_t_temporal - e_t_image)
if score_corrector is not None:
assert self.model.parameterization == "eps"
e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
# select parameters corresponding to the currently considered timestep
if is_video:
size = (b, 1, 1, 1, 1)
else:
size = (b, 1, 1, 1)
a_t = torch.full(size, alphas[index], device=device)
a_prev = torch.full(size, alphas_prev[index], device=device)
sigma_t = torch.full(size, sigmas[index], device=device)
sqrt_one_minus_at = torch.full(size, sqrt_one_minus_alphas[index],device=device)
# current prediction for x_0
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
# print(f't={t}, pred_x0, min={torch.min(pred_x0)}, max={torch.max(pred_x0)}',file=f)
if quantize_denoised:
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
# direction pointing to x_t
dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
# # norm pred_x0
# p=2
# s=()
# pred_x0 = pred_x0 - torch.max(torch.abs(pred_x0))
noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
if noise_dropout > 0.:
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
return x_prev, pred_x0
# Path: main/evaluation/motionctrl_inference.py
DEFAULT_NEGATIVE_PROMPT = 'blur, haze, deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, '\
'sketch, cartoon, drawing, anime, mutated hands and fingers, deformed, distorted, '\
'disfigured, poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, '\
'floating limbs, disconnected limbs, mutation, mutated, ugly, disgusting, amputation'
RT = camera_poses[..., None]
RT = None
def load_model_checkpoint(model, ckpt, adapter_ckpt=None):
def load_trajs(cond_dir, trajs):
def load_camera_pose(cond_dir, camera_poses):
def save_results(samples, filename, savedir, fps=10):
def motionctrl_sample(
model,
prompts,
noise_shape,
camera_poses=None,
trajs=None,
n_samples=1,
unconditional_guidance_scale=1.0,
unconditional_guidance_scale_temporal=None,
ddim_steps=50,
ddim_eta=1.,
**kwargs):
def run_inference(args, gpu_num, gpu_no):
def save_images(samples, savedir):
def get_parser():
# Path: utils/utils.py
def instantiate_from_config(config):
if not "target" in config:
if config == '__is_first_stage__':
return None
elif config == "__is_unconditional__":
return None
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict()))
# Path: app.py
import argparse
import os
import tempfile
import cv2
import gradio as gr
import imageio
import numpy as np
import torch
import torchvision
from functools import partial
from omegaconf import OmegaConf
from PIL import Image
from pytorch_lightning import seed_everything
from gradio_utils.camera_utils import CAMERA_MOTION_MODE, process_camera
from gradio_utils.traj_utils import (OBJECT_MOTION_MODE, get_provided_traj,
process_points, process_traj)
from gradio_utils.utils import vis_camera
from lvdm.models.samplers.ddim import DDIMSampler
from main.evaluation.motionctrl_inference import (DEFAULT_NEGATIVE_PROMPT,
load_model_checkpoint,
post_prompt)
from utils.utils import instantiate_from_config
os.environ['KMP_DUPLICATE_LIB_OK']='True'
SPACE_ID = os.environ.get('SPACE_ID', '')
#### Description ####
title = r"""<h1 align="center">MotionCtrl: A Unified and Flexible Motion Controller for Video Generation</h1>"""
description = r"""
<b>Official Gradio demo</b> for <a href='https://github.com/TencentARC/MotionCtrl' target='_blank'><b>MotionCtrl: A Unified and Flexible Motion Controller for Video Generation</b></a>.<br>
🔥 MotionCtrl is capable of independently and flexibly controling the camera motion and object motion of a generated video, with only a unified model.<br>
🤗 Try to control the motion of the generated videos yourself!<br>
❗❗❗ Please note that current version of **MotionCtrl** is deployed on **LVDM/VideoCrafter**. The versions that depolyed on **AnimateDiff** and **SVD** will be released soon.<br>
"""
article = r"""
If MotionCtrl is helpful, please help to ⭐ the <a href='https://github.com/TencentARC/MotionCtrl' target='_blank'>Github Repo</a>. Thanks!
[![GitHub Stars](https://img.shields.io/github/stars/TencentARC%2FMotionCtrl
)](https://github.com/TencentARC/MotionCtrl)
---
📝 **Citation**
<br>
If our work is useful for your research, please consider citing:
```bibtex
@inproceedings{wang2023motionctrl,
title={MotionCtrl: A Unified and Flexible Motion Controller for Video Generation},
author={Wang, Zhouxia and Yuan, Ziyang and Wang, Xintao and Chen, Tianshui and Xia, Menghan and Luo, Ping and Shan, Yin},
booktitle={arXiv preprint arXiv:2312.03641},
| year={2023} |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: TianxingWu/FreeInit
# Path: examples/AnimateDiff/animatediff/models/attention.py
class Transformer3DModel(ModelMixin, ConfigMixin):
@register_to_config
def __init__(
self,
num_attention_heads: int = 16,
attention_head_dim: int = 88,
in_channels: Optional[int] = None,
num_layers: int = 1,
dropout: float = 0.0,
norm_num_groups: int = 32,
cross_attention_dim: Optional[int] = None,
attention_bias: bool = False,
activation_fn: str = "geglu",
num_embeds_ada_norm: Optional[int] = None,
use_linear_projection: bool = False,
only_cross_attention: bool = False,
upcast_attention: bool = False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
):
super().__init__()
self.use_linear_projection = use_linear_projection
self.num_attention_heads = num_attention_heads
self.attention_head_dim = attention_head_dim
inner_dim = num_attention_heads * attention_head_dim
# Define input layers
self.in_channels = in_channels
self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)
if use_linear_projection:
self.proj_in = nn.Linear(in_channels, inner_dim)
else:
self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)
# Define transformers blocks
self.transformer_blocks = nn.ModuleList(
[
BasicTransformerBlock(
inner_dim,
num_attention_heads,
attention_head_dim,
dropout=dropout,
cross_attention_dim=cross_attention_dim,
activation_fn=activation_fn,
num_embeds_ada_norm=num_embeds_ada_norm,
attention_bias=attention_bias,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
)
for d in range(num_layers)
]
)
# 4. Define output layers
if use_linear_projection:
self.proj_out = nn.Linear(in_channels, inner_dim)
else:
self.proj_out = nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)
def forward(self, hidden_states, encoder_hidden_states=None, timestep=None, return_dict: bool = True):
# Input
assert hidden_states.dim() == 5, f"Expected hidden_states to have ndim=5, but got ndim={hidden_states.dim()}."
video_length = hidden_states.shape[2]
hidden_states = rearrange(hidden_states, "b c f h w -> (b f) c h w")
encoder_hidden_states = repeat(encoder_hidden_states, 'b n c -> (b f) n c', f=video_length)
batch, channel, height, weight = hidden_states.shape
residual = hidden_states
hidden_states = self.norm(hidden_states)
if not self.use_linear_projection:
hidden_states = self.proj_in(hidden_states)
inner_dim = hidden_states.shape[1]
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim)
else:
inner_dim = hidden_states.shape[1]
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim)
hidden_states = self.proj_in(hidden_states)
# Blocks
for block in self.transformer_blocks:
hidden_states = block(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
timestep=timestep,
video_length=video_length
)
# Output
if not self.use_linear_projection:
hidden_states = (
hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2).contiguous()
)
hidden_states = self.proj_out(hidden_states)
else:
hidden_states = self.proj_out(hidden_states)
hidden_states = (
hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2).contiguous()
)
output = hidden_states + residual
output = rearrange(output, "(b f) c h w -> b c f h w", f=video_length)
if not return_dict:
return (output,)
return Transformer3DModelOutput(sample=output)
# Path: examples/AnimateDiff/animatediff/models/resnet.py
class Downsample3D(nn.Module):
def __init__(self, channels, use_conv=False, out_channels=None, padding=1, name="conv"):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.padding = padding
stride = 2
self.name = name
if use_conv:
self.conv = InflatedConv3d(self.channels, self.out_channels, 3, stride=stride, padding=padding)
else:
raise NotImplementedError
def forward(self, hidden_states):
assert hidden_states.shape[1] == self.channels
if self.use_conv and self.padding == 0:
raise NotImplementedError
assert hidden_states.shape[1] == self.channels
hidden_states = self.conv(hidden_states)
return hidden_states
# Path: examples/AnimateDiff/animatediff/models/resnet.py
class ResnetBlock3D(nn.Module):
def __init__(
self,
*,
in_channels,
out_channels=None,
conv_shortcut=False,
dropout=0.0,
temb_channels=512,
groups=32,
groups_out=None,
pre_norm=True,
eps=1e-6,
non_linearity="swish",
time_embedding_norm="default",
output_scale_factor=1.0,
use_in_shortcut=None,
use_inflated_groupnorm=None,
):
super().__init__()
self.pre_norm = pre_norm
self.pre_norm = True
self.in_channels = in_channels
out_channels = in_channels if out_channels is None else out_channels
self.out_channels = out_channels
self.use_conv_shortcut = conv_shortcut
self.time_embedding_norm = time_embedding_norm
self.output_scale_factor = output_scale_factor
if groups_out is None:
groups_out = groups
assert use_inflated_groupnorm != None
if use_inflated_groupnorm:
self.norm1 = InflatedGroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)
else:
self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)
self.conv1 = InflatedConv3d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
if temb_channels is not None:
if self.time_embedding_norm == "default":
time_emb_proj_out_channels = out_channels
elif self.time_embedding_norm == "scale_shift":
time_emb_proj_out_channels = out_channels * 2
else:
raise ValueError(f"unknown time_embedding_norm : {self.time_embedding_norm} ")
self.time_emb_proj = torch.nn.Linear(temb_channels, time_emb_proj_out_channels)
else:
self.time_emb_proj = None
if use_inflated_groupnorm:
self.norm2 = InflatedGroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True)
else:
self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True)
self.dropout = torch.nn.Dropout(dropout)
self.conv2 = InflatedConv3d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
if non_linearity == "swish":
self.nonlinearity = lambda x: F.silu(x)
elif non_linearity == "mish":
self.nonlinearity = Mish()
elif non_linearity == "silu":
self.nonlinearity = nn.SiLU()
self.use_in_shortcut = self.in_channels != self.out_channels if use_in_shortcut is None else use_in_shortcut
self.conv_shortcut = None
if self.use_in_shortcut:
self.conv_shortcut = InflatedConv3d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, input_tensor, temb):
hidden_states = input_tensor
hidden_states = self.norm1(hidden_states)
hidden_states = self.nonlinearity(hidden_states)
hidden_states = self.conv1(hidden_states)
if temb is not None:
temb = self.time_emb_proj(self.nonlinearity(temb))[:, :, None, None, None]
if temb is not None and self.time_embedding_norm == "default":
hidden_states = hidden_states + temb
hidden_states = self.norm2(hidden_states)
if temb is not None and self.time_embedding_norm == "scale_shift":
scale, shift = torch.chunk(temb, 2, dim=1)
hidden_states = hidden_states * (1 + scale) + shift
hidden_states = self.nonlinearity(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.conv2(hidden_states)
if self.conv_shortcut is not None:
input_tensor = self.conv_shortcut(input_tensor)
output_tensor = (input_tensor + hidden_states) / self.output_scale_factor
return output_tensor
# Path: examples/AnimateDiff/animatediff/models/resnet.py
class Upsample3D(nn.Module):
def __init__(self, channels, use_conv=False, use_conv_transpose=False, out_channels=None, name="conv"):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_conv_transpose = use_conv_transpose
self.name = name
conv = None
if use_conv_transpose:
raise NotImplementedError
elif use_conv:
self.conv = InflatedConv3d(self.channels, self.out_channels, 3, padding=1)
def forward(self, hidden_states, output_size=None):
assert hidden_states.shape[1] == self.channels
if self.use_conv_transpose:
raise NotImplementedError
# Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16
dtype = hidden_states.dtype
if dtype == torch.bfloat16:
hidden_states = hidden_states.to(torch.float32)
# upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984
if hidden_states.shape[0] >= 64:
hidden_states = hidden_states.contiguous()
# if `output_size` is passed we force the interpolation output
# size and do not make use of `scale_factor=2`
if output_size is None:
hidden_states = F.interpolate(hidden_states, scale_factor=[1.0, 2.0, 2.0], mode="nearest")
else:
hidden_states = F.interpolate(hidden_states, size=output_size, mode="nearest")
# If the input is bfloat16, we cast back to bfloat16
if dtype == torch.bfloat16:
hidden_states = hidden_states.to(dtype)
# if self.use_conv:
# if self.name == "conv":
# hidden_states = self.conv(hidden_states)
# else:
# hidden_states = self.Conv2d_0(hidden_states)
hidden_states = self.conv(hidden_states)
return hidden_states
# Path: examples/AnimateDiff/animatediff/models/motion_module.py
def get_motion_module(
in_channels,
motion_module_type: str,
motion_module_kwargs: dict
):
if motion_module_type == "Vanilla":
return VanillaTemporalModule(in_channels=in_channels, **motion_module_kwargs,)
else:
raise ValueError
# Path: examples/AnimateDiff/animatediff/models/unet_blocks.py
import torch
import pdb
from torch import nn
from .attention import Transformer3DModel
from .resnet import Downsample3D, ResnetBlock3D, Upsample3D
from .motion_module import get_motion_module
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
# there is always at least one resnet
resnets = [
ResnetBlock3D(
in_channels=in_channels,
out_channels=in_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
use_inflated_groupnorm=use_inflated_groupnorm,
)
]
attentions = []
motion_modules = []
for _ in range(num_layers):
if dual_cross_attention:
raise NotImplementedError
attentions.append(
Transformer3DModel(
attn_num_head_channels,
in_channels // attn_num_head_channels,
in_channels=in_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
)
)
motion_modules.append(
get_motion_module(
in_channels=in_channels,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
) if use_motion_module else None
)
resnets.append(
ResnetBlock3D(
in_channels=in_channels,
out_channels=in_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
use_inflated_groupnorm=use_inflated_groupnorm,
)
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
self.motion_modules = nn.ModuleList(motion_modules)
def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):
hidden_states = self.resnets[0](hidden_states, temb)
for attn, resnet, motion_module in zip(self.attentions, self.resnets[1:], self.motion_modules):
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample
hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states
hidden_states = resnet(hidden_states, temb)
return hidden_states
class CrossAttnDownBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
cross_attention_dim=1280,
output_scale_factor=1.0,
downsample_padding=1,
add_downsample=True,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_inflated_groupnorm=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
| super().__init__() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: allenai/unified-io-2
# Path: t5x/examples/unified_io/data/data_utils.py
DEFAULT_EXTRA_IDS = VOCAB_START + NUM_DETECTION_BIN # 200 for denoising + 1000 extra ids
MODALITY_EXTRA_ID_N_FRAMES = 8 # 8 frames just in case
MODALITY_EXTRA_IDS = (1 + MODALITY_EXTRA_ID_N_FRAMES) * 2 # image/audio input + n * image/audio history
MODALITY_EXTRA_IDS = 0
H = tf.shape(image)[1]
W = tf.shape(image)[2]
H = tf.shape(image)[0]
W = tf.shape(image)[1]
def get_default_vocabulary():
def tokenize(data: Dict, copy_pretokenized=False):
def validate_keyword_prompts(prompts):
def apply_keyword_prompt(prompt, allow_missing=False, **kwargs):
def valid_regex_replace(prompt, replacements, allow_missing=False):
def trim_or_pad_tf(x, seq_len, pad_constant=0):
def trim_or_pad_tf_2d(x, batch, seq_len):
def normalize_image(image,
offset=(0.48145466, 0.4578275, 0.40821073),
scale=(0.26862954, 0.26130258, 0.27577711)):
def unnormalize_image(image,
offset=(0.48145466, 0.4578275, 0.40821073),
scale=(0.26862954, 0.26130258, 0.27577711)):
def denormalize_boxes(boxes, image_shape):
def clip_boxes(boxes, image_shape):
def get_non_empty_box_indices(boxes):
def resize_and_crop_boxes(boxes, image_scale, output_size, offset, paddings):
def resize_and_pad_default(
image, is_training, is_input=True, masks=None, boxes=None, box_labels=None,
random_scale_min=None, random_scale_max=None, random_scale_ratio=None,
resize_method=None, is_history=False
):
def resize_and_pad(image, desired_output_size, masks=None, boxes=None, box_labels=None,
random_scale_min=0.1, random_scale_max=2.0, do_random_scale=False,
shrink_both_sides=True, filter_box=True,
desired_target_size=None, random_scale_ratio=0.0,
resize_method=tf.image.ResizeMethod.BILINEAR,
pad_value=0):
def tokens_to_values(tokens):
def values_to_tokens(vals, clss=None):
def convert_bboxes_to_str(
boxes,
labels=None,
image_size=config.IMAGE_INPUT_SIZE[0],
convert_to_str=True,
shuffle=True,
seperator=" "
):
def apply_with_random_selector(x, func, num_cases):
def _stateless_shuffle(x: tf.Tensor, seed):
def sample_patches(mask, n_patches, stateless=False, seeds=None):
def pad_to_bounding_box(image, offset_height, offset_width, target_height,
target_width, value=0):
def pad_to_bounding_box_internal(image, offset_height, offset_width,
target_height, target_width, check_dims, value):
def random_element(vec, seed=None):
def load_class_names(path, cache={}) -> List[str]:
def _load_class_name(path) -> List[str]:
def box_mask(box, image_size, inclusive=False):
# Path: t5x/examples/unified_io/data/prompt_definition.py
class Prompt:
"""Configurable interface for getting prompts"""
def __init__(self, original_flag=True, revised_original_flag=False, manual_flag=True,
gpt3_flag=True, single_prompt=False, dbg=None):
self.prompt_list = []
self.original_flag = original_flag
self.revised_original_flag = revised_original_flag
self.manual_flag = manual_flag
self.gpt3_flag = gpt3_flag
self.single_prompt = single_prompt
self.dbg = dbg
def get_prompt_list(self, task_name, dataset_name):
if self.dbg:
logging.info(f"Using dbg prmopt {self.dbg}")
return [self.dbg]
prompt_list = []
if self.original_flag:
if self.revised_original_flag and 'revised_original' in PROMPT_DICT[task_name]:
prompt_list += PROMPT_DICT[task_name]['revised_original']
else:
prompt_list += PROMPT_DICT[task_name]['original']
if self.revised_original_flag and 'revised_original' in PROMPT_DICT[dataset_name]:
prompt_list += PROMPT_DICT[dataset_name]['revised_original']
else:
prompt_list += PROMPT_DICT[dataset_name]['original']
if self.manual_flag:
if 'manual' in PROMPT_DICT[task_name]:
prompt_list += PROMPT_DICT[task_name]['manual']
if 'manual' in PROMPT_DICT[dataset_name]:
prompt_list += PROMPT_DICT[dataset_name]['manual']
if self.gpt3_flag:
if 'gpt3' in PROMPT_DICT[task_name]:
prompt_list += PROMPT_DICT[task_name]['gpt3']
if 'gpt3' in PROMPT_DICT[dataset_name]:
prompt_list += PROMPT_DICT[dataset_name]['gpt3']
if not prompt_list:
raise ValueError(f"No prompts for {task_name}/{dataset_name}")
if self.single_prompt:
logging.info(f"Using prompt \"{prompt_list[0]}\" for {task_name} {dataset_name}")
return prompt_list[:1]
return prompt_list
# Path: t5x/examples/unified_io/data/data_utils.py
def resize_and_pad(image, desired_output_size, masks=None, boxes=None, box_labels=None,
random_scale_min=0.1, random_scale_max=2.0, do_random_scale=False,
shrink_both_sides=True, filter_box=True,
desired_target_size=None, random_scale_ratio=0.0,
resize_method=tf.image.ResizeMethod.BILINEAR,
pad_value=0):
"""Resizes and pads an input image/video to `desired_output_size`
Support random scaling augmentation if `do_random_scale` is True
If `masks` or `boxes` are given, the same transformation that is applied ot the image
is applied to them. Boxes can be completely removed if doing scaling augmentation, in which
case the deleted boxes will not be returned.
outputs:
image: The resized image/video
image_mask: A mask showing which pixels are padding in the output image
meta-data: Meta-data about the transformation and the boxes/masks that were also transformed
"""
desired_height, desired_width = desired_output_size
desired_height_f = tf.cast(desired_height, dtype=tf.float32)
desired_width_f = tf.cast(desired_width, dtype=tf.float32)
is_video = len(image.shape) == 4
if is_video:
height = tf.cast(tf.shape(image)[1], tf.float32)
width = tf.cast(tf.shape(image)[2], tf.float32)
else:
height = tf.cast(tf.shape(image)[0], tf.float32)
width = tf.cast(tf.shape(image)[1], tf.float32)
if boxes is not None:
# Converts boxes from normalized coordinates to pixel coordinates.
# Now the coordinates of boxes are w.r.t. the original image.
boxes = denormalize_boxes(boxes, [height, width])
if do_random_scale:
random_scale_factor = tf.random.uniform([], random_scale_min, random_scale_max)
if not shrink_both_sides:
# Max random is where scale * W > W_desired
# scale * H > H_desired
rsf_max = tf.maximum(desired_width_f / width, desired_height_f / height)
random_scale_factor = tf.minimum(rsf_max, random_scale_factor)
scaled_y = tf.cast(random_scale_factor * desired_height_f, tf.int32)
scaled_x = tf.cast(random_scale_factor * desired_width_f, tf.int32)
# Recompute the accurate scale_factor using rounded scaled image size.
image_scale_y = tf.cast(scaled_y, tf.float32) / height
image_scale_x = tf.cast(scaled_x, tf.float32) / width
image_scale = tf.cond(tf.less(
tf.random.uniform([], minval=0, maxval=1, dtype=tf.float32),
tf.cast(random_scale_ratio, tf.float32)),
lambda: tf.maximum(image_scale_x, image_scale_y),
lambda: tf.minimum(image_scale_x, image_scale_y))
# Don't scale any side lower than to 64
# For very wide images, this truncates the edge in order to keep the resolution
# reasonable
image_scale = tf.maximum(image_scale, 64.0 / tf.minimum(height, width))
# Select non-zero random offset (x, y) if scaled image is larger than
scaled_height = tf.cast(height * image_scale, tf.int32)
scaled_width = tf.cast(width * image_scale, tf.int32)
offset_y = tf.cast(scaled_height - desired_height, tf.float32)
offset_x = tf.cast(scaled_width - desired_width, tf.float32)
offset_y = tf.maximum(0.0, offset_y) * tf.random.uniform([], 0, 1)
offset_x = tf.maximum(0.0, offset_x) * tf.random.uniform([], 0, 1)
offset_y = tf.cast(offset_y, tf.int32)
offset_x = tf.cast(offset_x, tf.int32)
else:
image_scale_y = desired_height_f / height
image_scale_x = desired_width_f / width
image_scale = tf.minimum(image_scale_x, image_scale_y)
scaled_height = tf.cast(height * image_scale, tf.int32)
scaled_width = tf.cast(width * image_scale, tf.int32)
offset_y = tf.constant(0)
offset_x = tf.constant(0)
# Now resize and crop
if resize_method == 'random' and do_random_scale and (not tf.executing_eagerly()):
resize_methods = sorted([k for k in tf.image.ResizeMethod.__dict__.keys() if k.isupper()])
# print("Random resize method:\n{}".format(','.join(resize_methods)))
image = apply_with_random_selector(
image,
lambda x, method_idx: tf.image.resize(x, [scaled_height, scaled_width],
tf.image.ResizeMethod.__dict__[resize_methods[method_idx]],
antialias=True),
num_cases=len(resize_methods))
elif resize_method != 'random':
image = tf.image.resize(image, [scaled_height, scaled_width], method=resize_method, antialias=True)
else:
logging.info(f"you passed in {resize_method} but doing bilinear resize instead (possibly because eager is on or evaluation is on.)")
image = tf.image.resize(image, [scaled_height, scaled_width],
method=tf.image.ResizeMethod.BILINEAR, antialias=True)
image = tf.clip_by_value(image, 0.0, 1.0)
if is_video:
# frames x H x W x C
image = image[:,offset_y:offset_y + desired_height, offset_x:offset_x + desired_width, :]
H = tf.shape(image)[1]
W = tf.shape(image)[2]
else:
# H x W x C
image = image[offset_y:offset_y + desired_height, offset_x:offset_x + desired_width, :]
H = tf.shape(image)[0]
W = tf.shape(image)[1]
if config.PAD_ONE_SIDE:
top_pad = 0
left_pad = 0
else:
top_pad = (desired_height - H) // 2
left_pad = (desired_width - W) // 2
# Get the mask which indicates which regions were padded
mask = tf.ones(tf.concat([tf.shape(image)[:-1], [1]], 0), dtype=tf.int32)
image_mask = tf.squeeze(pad_to_bounding_box(
mask, top_pad, left_pad, desired_height, desired_width), -1)
image = pad_to_bounding_box(image, top_pad, left_pad, desired_height, desired_width,
value=pad_value)
if is_video:
image.set_shape([None, desired_height, desired_width, 3])
else:
image.set_shape([desired_height, desired_width, 3])
if masks is not None and tf.size(masks) != 0:
masks = tf.image.resize(
masks, [scaled_height, scaled_width],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
if len(masks.shape) == 3:
masks = masks[offset_y:offset_y + desired_height, offset_x:offset_x + desired_width]
else:
masks = masks[:, offset_y:offset_y + desired_height, offset_x:offset_x + desired_width]
masks = pad_to_bounding_box(masks, top_pad, left_pad, desired_height, desired_width)
masks = tf.image.resize(masks, desired_target_size,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
indices = None
if boxes is not None:
boxes = resize_and_crop_boxes(
boxes,
tf.stack([image_scale, image_scale]),
[desired_height, desired_width],
tf.cast(tf.stack([offset_y, offset_x]), dtype=tf.float32),
tf.cast(tf.stack([top_pad, left_pad]), dtype=tf.float32))
if filter_box:
indices = get_non_empty_box_indices(boxes)
else:
indices = tf.range(tf.shape(boxes)[0])
boxes = tf.gather(boxes, indices)
if box_labels is not None:
box_labels = tf.gather(box_labels, indices)
# Stores meta meta-data about how the image was resized, needed if we want
# reverse the padding/resizing later
image_info = tf.stack([
tf.cast(top_pad, tf.float32),
tf.cast(left_pad, tf.float32),
1.0 / image_scale,
height,
width,
tf.cast(offset_y, dtype=tf.float32) / height,
tf.cast(offset_x, dtype=tf.float32) / width,
tf.cast(offset_y, dtype=tf.float32),
tf.cast(offset_x, dtype=tf.float32),
tf.cast(scaled_height, dtype=tf.float32),
tf.cast(scaled_width, dtype=tf.float32),
])
outputs = (image_info, masks, boxes, box_labels, indices)
return image, image_mask, outputs
# Path: t5x/examples/unified_io/data/data_utils.py
def resize_and_pad_default(
image, is_training, is_input=True, masks=None, boxes=None, box_labels=None,
random_scale_min=None, random_scale_max=None, random_scale_ratio=None,
resize_method=None, is_history=False
):
"""Apply `resize_and_pad` with default settings"""
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
if random_scale_min is None:
random_scale_min = config.RANDOM_SCALE_MIN
if random_scale_max is None:
random_scale_max = config.RANDOM_SCALE_MAX
if random_scale_ratio is None:
random_scale_ratio = config.RANDOM_SCALE_RATIO
if resize_method is None:
resize_method ='random' if is_training else tf.image.ResizeMethod.BILINEAR
if len(image.shape) == 4 or is_history:
assert is_input
output_size = config.IMAGE_HISTORY_INPUT_SIZE
else:
output_size = config.IMAGE_INPUT_SIZE if is_input else config.IMAGE_TARGET_SIZE
return resize_and_pad(
image, output_size,
masks, boxes, box_labels,
random_scale_min=random_scale_min,
random_scale_max=random_scale_max,
do_random_scale=is_training,
random_scale_ratio=random_scale_ratio,
resize_method=resize_method
)
# Path: t5x/examples/unified_io/data/data_utils.py
def random_element(vec, seed=None):
if isinstance(vec, list):
if len(vec) == 1:
return vec[0]
assert len(vec) > 0
vec = tf.constant(vec)
if seed is not None:
ix = tf.random.stateless_uniform((), seed, 0, tf.shape(vec)[0], tf.int32)
else:
ix = tf.random.uniform((), 0, tf.shape(vec)[0], tf.int32)
return vec[ix]
# Path: t5x/examples/unified_io/data/data_utils.py
def convert_bboxes_to_str(
boxes,
labels=None,
image_size=config.IMAGE_INPUT_SIZE[0],
convert_to_str=True,
shuffle=True,
seperator=" "
):
"""Converts a sequence of bound boxes into a sequence of location tokens"""
if shuffle:
# shuffle the labels, ids.
indices = tf.range(start=0, limit=tf.shape(boxes)[0], dtype=tf.int32)
shuffled_indices = tf.random.shuffle(indices)
if labels is not None:
labels = tf.gather(labels, shuffled_indices)
boxes = tf.gather(boxes, shuffled_indices)
boxes_str = values_to_tokens(tf.cast(boxes, tf.float32)/image_size)
if labels is not None:
labels_str = tf.expand_dims(labels, axis=-1)
boxes_str = tf.concat([boxes_str, labels_str], axis=-1)
if convert_to_str:
boxes_str = tf.strings.reduce_join(boxes_str, separator=' ', axis=-1)
boxes_str = tf.strings.reduce_join(boxes_str, separator=seperator)
return boxes_str
# Path: t5x/examples/unified_io/data/data_utils.py
def valid_regex_replace(prompt, replacements, allow_missing=False):
"""Replaces occurrences of keys in `replacements` in `prompt` with the values
Assume occurrences only occur once."""
if allow_missing:
for c, value in replacements.items():
res = tf.strings.split(prompt, c)
if tf.shape(res) == 2:
prompt = tf.strings.join([res[0], value, res[1]])
return prompt
else:
for c, value in replacements.items():
# We avoid regex_replace since it has issues if the replacement has
# bashslashes that appears can't be avoided
res = tf.strings.split(prompt, c)
tf.assert_equal(tf.shape(res), 2, message="prompt substitution error")
prompt = tf.strings.join([res[0], value, res[1]])
return prompt
# Path: t5x/examples/unified_io/data/data_utils.py
def load_class_names(path, cache={}) -> List[str]:
if path not in cache:
cache[path] = _load_class_name(path)
return cache[path]
# Path: t5x/examples/unified_io/config.py
MULTITASK_TFDS_DATA_DIR = None
LLAMA_TOKENIZER_PATH = None
SHUFFLE_BUFFER_SIZE = [100, 200, 200, 200]
CYCLE_LENGTH = [1, 2, 2, 2]
BLOCK_LENGTH = [1, 2, 2, 2]
VOCAB_START = 200
NUM_DETECTION_BIN = 1000
POS_MAX_VALUE = 50
POS_MIN_VALUE = -50
D_THETA_MAX_VALUE = math.pi
D_THETA_MIN_VALUE = -math.pi
D_RADIUS_MAX_VALUE = 0.7
D_RADIUS_MIN_VALUE = -0.7
D_SINUSOID_MAX_VALUE = 1.0
D_SINUSOID_MIN_VALUE = -1.0
RANDOM_SCALE_MAX = 1.3333
RANDOM_SCALE_MIN = 0.75
RANDOM_SCALE_RATIO = 0.50
IMAGE_INPUT_SIZE = [384, 384]
IMAGE_INPUT_D = 16
IMAGE_INPUT_PATCHES = (IMAGE_INPUT_SIZE[0] // IMAGE_INPUT_D, IMAGE_INPUT_SIZE[1] // IMAGE_INPUT_D)
IMAGE_HISTORY_INPUT_SIZE = [256, 256]
IMAGE_HISTORY_INPUT_D = 16
IMAGE_VIT_MEAN = [0.48145466, 0.4578275, 0.40821073]
IMAGE_VIT_STD = [0.26862954, 0.26130258, 0.27577711]
IMAGE_TARGET_SIZE = [256, 256]
IMAGE_TARGET_D = 8
LOCATION_RANGE = [-0.1, 1.1]
DIMENSION_RANGE = [0, 6]
DEPTH_RANGE = [-0.001, 0.1]
ANGLE_RANGE = [0, 6.283185307179586]
TOKENIZER = "llama"
PAD_ONE_SIDE = False
AUDIO_INPUT_SIZE = [256, 128]
AUDIO_INPUT_D = 16
AUDIO_TARGET_SIZE = [256, 128]
AUDIO_TARGET_D = 8
AUDIO_HISTORY_INPUT_SIZE = [256, 128]
AUDIO_HISTORY_INPUT_D = 16
AUDIO_SEGMENT_LENGTH = 4.08
AUDIO_SAMPLING_RATE = 16000
AUDIOSET_MEAN = -5.0945
AUDIOSET_STD = 3.8312
AUDIO_VIT_MEAN = -4.26
AUDIO_VIT_STD = 9.14
HUMAN_POSE_PART = [
"nose", "left eye", "right eye", "left ear", "right ear", "left shoulder",
"right shoulder", "left elbow", "right elbow", "left wrist", "right wrist",
"left hip", "right hip", "left knee", "right knee", "left ankle", "right ankle"]
class T5Config:
class VAEConfig:
class ImageVitFeatureConfig:
class AudioVitFeatureConfig:
class ImageResamplerConfig:
class AudioResamplerConfig:
class ImageViTVQGANConfig:
class AudioViTVQGANConfig:
class Config:
# Path: t5x/examples/unified_io/data/prompt_dict.py
PROMPT_DICT = defaultdict(lambda: dict(original=[], manual=[], gpt3=[], template=[]))
ENTAILMENT_LABELS = ['entailment', 'neutral', 'contradiction']
TRUNCATE = "[TR]"
IMAGE_GENERATION_NOISY = [
"Generate an image that roughly matches this text: {}",
"Build an image related to: \"{}\"",
"Build an image that could match \"{}\"",
"Description: {}\nInstruction: Build an image that roughly matches the description",
"Draw something related to this text: {}",
"Construct an image that is associated with \"{}\"",
'What is an image that is associated with the text "{}"?',
"Text: {}\nPlease build an image related to the text.",
"{}\nConstruct a related picture.",
"{}\nCan you draw a related image?",
"\"{}\" could be associated with what image?",
]
WEB_IMAGE_GENERATION = dict(
original=["Generate an image that might belong to a webpage that includes this text: {}"],
manual=IMAGE_GENERATION_NOISY + [
"Generate an image that might belong to a webpage that contained this text: {}",
"Imagine a webpage that includes the text \"{}\", draw a related image that might be on that page.",
"Generate an image that could match the alt-text: {}",
"For the alt-text: {}, generate an image that could match it.",
]
)
VIMA_ACTION_DESC = """\
as a sequence of sentences with the format: \
"step: start-action from ( a1 b1 r1 ) and end-action to ( a2 b2 r2 )"\
"""
POINTNAV_TARGET_DESC = """\
{#OBS}with the discrete action space "{#action_space}"\
"""
POINTNAV_OBS_DESC = """\
based on agent's histories with the format \
"observation ( x z y ) action"\
"""
POINTNAV_DESC = f"{POINTNAV_TARGET_DESC} {POINTNAV_OBS_DESC}"
NEXT_ACTION_PROMPT = [
f"Foresee the next action",
f"Anticipate the following action",
f"Compute the subsequent action",
f"Estimate the next action",
f"Propose the next move",
f"Predict the following action",
f"Calculate the next action",
f"Forecast the next action",
f"Conjecture the subsequent action",
f"Project the next move",
f"Estimate the forthcoming action",
f"Assess the next action",
f"Surmise the next action",
f"Calculate the following move",
f"Predict the subsequent action",
f"Determine the next action",
f"Approximate the next move",
f"Guess the following action",
f"Formulate the next action",
]
class NlpPrompt:
def _original(question, options, one_sentence=False):
# Path: t5x/examples/unified_io/data/preprocessing.py
from functools import reduce
from typing import Dict, List, Any
from t5x.examples.unified_io.data import data_utils
from t5x.examples.unified_io.data.prompt_definition import Prompt
from t5x.examples.unified_io.data.data_utils import resize_and_pad, resize_and_pad_default, \
random_element, convert_bboxes_to_str, valid_regex_replace, load_class_names
from t5x.examples.unified_io import config
from t5x.examples.unified_io.data.prompt_dict import Image_Generation_No_Text
import gin
import tensorflow as tf
import seqio
import numpy as np
"""Preprocessing functions used in seqio.Tasks"""
@seqio.utils.map_over_dataset
def rekey(x: Dict[str, Any], key_map: Dict[str, List]):
"""Get elements from possibly nested dict `x` according to the mapping in `key_map`.
Args:
x: an example to process
key_map: dictionary mapping new keys to original keys
Returns:
A preprocessed example with the format listed above.
"""
def _get(data, keys):
return reduce(dict.get, keys, data)
return {
new_key: _get(x, old_key) if old_key else ''
for new_key, old_key in key_map.items()
}
def flatten_parts(ds: tf.data.Dataset, parts: List[str], add_index=False) -> tf.data.Dataset:
"""Flatten `ds` so that the features in `parts` are flattened (meaning each slice of those
features becomes an individual example) and the other features in ds are duplicated"""
def _flatten(ex):
flat_key = {k: ex[k] for k in parts}
if add_index:
flat_key['index'] = tf.range(len(ex[parts[0]]))
| flat_ds = tf.data.Dataset.from_tensor_slices(flat_key) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: zju3dv/EasyVolcap
# Path: easyvolcap/utils/base_utils.py
class dotdict(dict, Dict[KT, VT]):
"""
This is the default data passing object used throughout the codebase
Main function: dot access for dict values & dict like merging and updates
a dictionary that supports dot notation
as well as dictionary access notation
usage: d = make_dotdict() or d = make_dotdict{'val1':'first'})
set attributes: d.val2 = 'second' or d['val2'] = 'second'
get attributes: d.val2 or d['val2']
"""
def update(self, dct: Dict = None, **kwargs):
dct = copy(dct) # avoid modifying the original dict, use super's copy to avoid recursion
# Handle different arguments
if dct is None:
dct = kwargs
elif isinstance(dct, Mapping):
dct.update(kwargs)
else:
super().update(dct, **kwargs)
return
# Recursive updates
for k, v in dct.items():
if k in self:
# Handle type conversions
target_type = type(self[k])
if not isinstance(v, target_type):
# NOTE: bool('False') will be True
if target_type == bool and isinstance(v, str):
dct[k] = v == 'True'
else:
dct[k] = target_type(v)
if isinstance(v, dict):
self[k].update(v) # recursion from here
else:
self[k] = v
else:
if isinstance(v, dict):
self[k] = dotdict(v) # recursion?
else:
self[k] = v
return self
def __init__(self, *args, **kwargs):
self.update(*args, **kwargs)
copy = return_dotdict(dict.copy)
fromkeys = return_dotdict(dict.fromkeys)
# def __hash__(self):
# # return hash(''.join([str(self.values().__hash__())]))
# return super(dotdict, self).__hash__()
# def __init__(self, *args, **kwargs):
# super(dotdict, self).__init__(*args, **kwargs)
"""
Uncomment following lines and
comment out __getattr__ = dict.__getitem__ to get feature:
returns empty numpy array for undefined keys, so that you can easily copy things around
TODO: potential caveat, harder to trace where this is set to np.array([], dtype=np.float32)
"""
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError as e:
raise AttributeError(e)
# MARK: Might encounter exception in newer version of pytorch
# Traceback (most recent call last):
# File "/home/xuzhen/miniconda3/envs/torch/lib/python3.9/multiprocessing/queues.py", line 245, in _feed
# obj = _ForkingPickler.dumps(obj)
# File "/home/xuzhen/miniconda3/envs/torch/lib/python3.9/multiprocessing/reduction.py", line 51, in dumps
# cls(buf, protocol).dump(obj)
# KeyError: '__getstate__'
# MARK: Because you allow your __getattr__() implementation to raise the wrong kind of exception.
# FIXME: not working typing hinting code
__getattr__: Callable[..., 'torch.Tensor'] = __getitem__ # type: ignore # overidden dict.__getitem__
__getattribute__: Callable[..., 'torch.Tensor'] # type: ignore
# __getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
# TODO: better ways to programmically define these special variables?
@property
def meta(self) -> dotdict:
# Special variable used for storing cpu tensor in batch
if 'meta' not in self:
self.meta = dotdict()
return self.__getitem__('meta')
@meta.setter
def meta(self, meta):
self.__setitem__('meta', meta)
@property
def output(self) -> dotdict: # late annotation needed for this
# Special entry for storing output tensor in batch
if 'output' not in self:
self.output = dotdict()
return self.__getitem__('output')
@output.setter
def output(self, output):
self.__setitem__('output', output)
@property
def persistent(self) -> dotdict: # late annotation needed for this
# Special entry for storing persistent tensor in batch
if 'persistent' not in self:
self.persistent = dotdict()
return self.__getitem__('persistent')
@persistent.setter
def persistent(self, persistent):
self.__setitem__('persistent', persistent)
@property
def type(self) -> str: # late annotation needed for this
# Special entry for type based construction system
return self.__getitem__('type')
@type.setter
def type(self, type):
self.__setitem__('type', type)
def to_dict(self):
out = dict()
for k, v in self.items():
if isinstance(v, dotdict):
v = v.to_dict() # recursion point
out[k] = v
return out
# Path: easyvolcap/utils/easy_utils.py
def write_camera(cameras: dict, path: str, intri_path: str = '', extri_path: str = ''):
from os.path import join
os.makedirs(path, exist_ok=True)
if not intri_path or not extri_path:
intri_name = join(path, 'intri.yml') # TODO: make them arguments
extri_name = join(path, 'extri.yml')
intri = FileStorage(intri_name, True)
extri = FileStorage(extri_name, True)
cam_names = [key_.split('.')[0] for key_ in cameras.keys()]
intri.write('names', cam_names, 'list')
extri.write('names', cam_names, 'list')
cameras = dotdict(cameras)
for key_, val in cameras.items():
# Skip special keys
if key_ == 'basenames': continue
# if key_ == 'avg_R': continue
# if key_ == 'avg_T': continue
key = key_.split('.')[0]
# Intrinsics
intri.write('K_{}'.format(key), val.K)
if 'H' in val: intri.write('H_{}'.format(key), val.H, 'real')
if 'W' in val: intri.write('W_{}'.format(key), val.W, 'real')
# Distortion
if 'D' not in val:
if 'dist' in val: val.D = val.dist
else: val.D = np.zeros((5, 1))
intri.write('D_{}'.format(key), val.D.reshape(5, 1))
# Extrinsics
if 'R' not in val: val.R = cv2.Rodrigues(val.Rvec)[0]
if 'Rvec' not in val: val.Rvec = cv2.Rodrigues(val.R)[0]
extri.write('R_{}'.format(key), val.Rvec)
extri.write('Rot_{}'.format(key), val.R)
extri.write('T_{}'.format(key), val.T.reshape(3, 1))
# Temporal
if 't' in val: extri.write('t_{}'.format(key), val.t, 'real')
# Bounds
if 'n' in val: extri.write('n_{}'.format(key), val.n, 'real')
if 'f' in val: extri.write('f_{}'.format(key), val.f, 'real')
if 'bounds' in val: extri.write('bounds_{}'.format(key), val.bounds)
# Color correction matrix
if 'ccm' in val: intri.write('ccm_{}'.format(key), val.ccm)
# # Averaged camera matrix (optional)
# if 'c2w_avg' in cameras:
# cameras.avg_R = cameras.c2w_avg[:3, :3]
# cameras.avg_T = cameras.c2w_avg[:3, 3:]
# if 'avg_R' in cameras and 'avg_T' in cameras:
# extri.write('avg_R'.format(key), cameras.avg_R)
# extri.write('avg_T'.format(key), cameras.avg_T.reshape(3, 1))
# Path: easyvolcap/utils/parallel_utils.py
def parallel_execution(*args, action: Callable, num_workers=32, print_progress=False, sequential=False, async_return=False, desc=None, use_process=False, **kwargs):
"""
Executes a given function in parallel using threads or processes.
When using threads, the parallelism is achieved during IO blocking (i.e. when loading images from disk or writing something to disk).
If your task is compute intensive, consider using packages like numpy or torch since they release the GIL during heavy lifting.
Args:
*args: Variable length argument list.
action (Callable): The function to execute in parallel.
num_workers (int): The number of worker threads or processes to use.
print_progress (bool): Whether to print a progress bar.
sequential (bool): Whether to execute the function sequentially instead of in parallel.
async_return (bool): Whether to return a pool object for asynchronous results.
desc (str): The description to use for the progress bar.
use_process (bool): Whether to use processes instead of threads.
**kwargs: Arbitrary keyword arguments.
Returns:
If `async_return` is False, returns a list of the results of executing the function on each input argument.
If `async_return` is True, returns a pool object for asynchronous results.
"""
# https://superfastpython.com/threadpool-python/
# Python threads are well suited for use with IO-bound tasks
# MARK: DO NOT USE THIS FOR CPU BOUND TASK. THIS IS A CHEAP "THREAD" POOL WHICH SUCCUMBS TO PYTHON GIL
# MARK: USE POOL INTEAD OF THREAD POOL IF THAT IS THE CASE
# NOTE: we expect first arg / or kwargs to be distributed
# NOTE: print_progress arg is reserved
def get_length(args: List, kwargs: Dict):
for a in args:
if isinstance(a, list):
return len(a)
for v in kwargs.values():
if isinstance(v, list):
return len(v)
raise NotImplementedError
def get_action_args(length: int, args: List, kwargs: Dict, i: int):
action_args = [(arg[i] if isinstance(arg, list) and len(arg) == length else arg) for arg in args]
# TODO: Support all types of iterable
action_kwargs = {key: (kwargs[key][i] if isinstance(kwargs[key], list) and len(kwargs[key]) == length else kwargs[key]) for key in kwargs}
return action_args, action_kwargs
if not sequential:
# Create ThreadPool
if use_process:
pool = Pool(processes=num_workers)
else:
pool = ThreadPool(processes=num_workers)
# Spawn threads
results = []
asyncs = []
length = get_length(args, kwargs)
for i in range(length):
action_args, action_kwargs = get_action_args(length, args, kwargs, i)
async_result = pool.apply_async(action, action_args, action_kwargs)
asyncs.append(async_result)
# Join threads and get return values
if not async_return:
for async_result in tqdm(asyncs, back=3, desc=desc, disable=not print_progress): # log previous frame
results.append(async_result.get()) # will sync the corresponding thread
pool.close()
pool.join()
return results
else:
return pool
else:
results = []
length = get_length(args, kwargs)
for i in tqdm(range(length), back=3, desc=desc, disable=not print_progress): # log previous frame
action_args, action_kwargs = get_action_args(length, args, kwargs, i)
async_result = action(*action_args, **action_kwargs)
results.append(async_result)
return results
# Path: easyvolcap/utils/data_utils.py
def read_cam_file(filename):
with open(filename) as f:
lines = [line.rstrip() for line in f.readlines()]
# extrinsics: line [1,5), 4x4 matrix
extrinsics = np.fromstring(' '.join(lines[1:5]), dtype=np.float32, sep=' ')
extrinsics = extrinsics.reshape((4, 4))
# intrinsics: line [7-10), 3x3 matrix
intrinsics = np.fromstring(' '.join(lines[7:10]), dtype=np.float32, sep=' ')
intrinsics = intrinsics.reshape((3, 3))
# depth_min & depth_interval: line 11
depth_min = float(lines[11].split()[0])
return intrinsics, extrinsics, depth_min
# Path: easyvolcap/utils/data_utils.py
def read_pfm(filename):
file = open(filename, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().decode('utf-8').rstrip()
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode('utf-8'))
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
file.close()
return data, scale
# Path: scripts/preprocess/dnerf_synthetic_to_easyvolcap.py
import os
import cv2
import json
import argparse
import operator
import numpy as np
import imageio.v2 as imageio
import sys
from os.path import join, exists
from easyvolcap.utils.console_utils import *
from easyvolcap.utils.base_utils import dotdict
from easyvolcap.utils.easy_utils import write_camera
from easyvolcap.utils.parallel_utils import parallel_execution
from easyvolcap.utils.data_utils import read_cam_file, read_pfm
sys.path.append('.')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dnerf_root', type=str, default='')
parser.add_argument('--easyvolcap_root', type=str, default='data/dnerf')
parser.add_argument('--black_bkgds', action='store_true')
args = parser.parse_args()
dnerf_root = args.dnerf_root
easyvolcap_root = args.easyvolcap_root
def process_camera_image(dnerf_path, easyvolcap_path, split, frames, camera_angle_x, H, W):
# Define and create output image path and mask path
img_out_dir = join(easyvolcap_path, split, f'images', '00')
msk_out_dir = join(easyvolcap_path, split, f'masks', '00')
os.makedirs(img_out_dir, exist_ok=True)
os.makedirs(msk_out_dir, exist_ok=True)
cameras = dotdict()
# Remove frames with the same timestamp
for cnt, frame in enumerate(frames):
# Create soft link for image
img_dnerf_path = join(dnerf_path, frame['file_path'][2:] + '.png')
img_easyvolcap_path = join(img_out_dir, f'{cnt:03d}.png')
img = imageio.imread(img_dnerf_path) / 255.0
if args.black_bkgds: img = img[..., :3]
else: img = img[..., :3] * img[..., -1:] + (1 - img[..., -1:])
imageio.imwrite(img_easyvolcap_path, (img * 255.0).astype(np.uint8))
# Create mask for the image
msk = imageio.imread(img_dnerf_path).sum(axis=-1) > 0
msk = msk.astype(np.uint8) * 255
cv2.imwrite(join(msk_out_dir, f'{cnt:03d}.png'), msk)
# Fetch and store camera parameters
c2w_opengl = np.array(frame['transform_matrix']).astype(np.float32)
c2w_opencv = c2w_opengl @ np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
w2c_opencv = np.linalg.inv(c2w_opencv)
cameras[f'{cnt:03d}'] = {
'R': w2c_opencv[:3, :3],
| 'T': w2c_opencv[:3, 3:], |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: minghanqin/LangSplat
# Path: scene/colmap_loader.py
def read_extrinsics_text(path):
"""
Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py
"""
images = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
image_id = int(elems[0])
qvec = np.array(tuple(map(float, elems[1:5])))
tvec = np.array(tuple(map(float, elems[5:8])))
camera_id = int(elems[8])
image_name = elems[9]
elems = fid.readline().split()
xys = np.column_stack([tuple(map(float, elems[0::3])),
tuple(map(float, elems[1::3]))])
point3D_ids = np.array(tuple(map(int, elems[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images
# Path: scene/colmap_loader.py
def read_intrinsics_text(path):
"""
Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py
"""
cameras = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
camera_id = int(elems[0])
model = elems[1]
assert model == "PINHOLE", "While the loader support other types, the rest of the code assumes PINHOLE"
width = int(elems[2])
height = int(elems[3])
params = np.array(tuple(map(float, elems[4:])))
cameras[camera_id] = Camera(id=camera_id, model=model,
width=width, height=height,
params=params)
return cameras
# Path: scene/colmap_loader.py
def qvec2rotmat(qvec):
return np.array([
[1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,
2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],
[2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,
2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],
[2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])
# Path: scene/colmap_loader.py
def read_extrinsics_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesBinary(const std::string& path)
void Reconstruction::WriteImagesBinary(const std::string& path)
"""
images = {}
with open(path_to_model_file, "rb") as fid:
num_reg_images = read_next_bytes(fid, 8, "Q")[0]
for _ in range(num_reg_images):
binary_image_properties = read_next_bytes(
fid, num_bytes=64, format_char_sequence="idddddddi")
image_id = binary_image_properties[0]
qvec = np.array(binary_image_properties[1:5])
tvec = np.array(binary_image_properties[5:8])
camera_id = binary_image_properties[8]
image_name = ""
current_char = read_next_bytes(fid, 1, "c")[0]
while current_char != b"\x00": # look for the ASCII 0 entry
image_name += current_char.decode("utf-8")
current_char = read_next_bytes(fid, 1, "c")[0]
num_points2D = read_next_bytes(fid, num_bytes=8,
format_char_sequence="Q")[0]
x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,
format_char_sequence="ddq"*num_points2D)
xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),
tuple(map(float, x_y_id_s[1::3]))])
point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images
# Path: scene/colmap_loader.py
def read_intrinsics_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::WriteCamerasBinary(const std::string& path)
void Reconstruction::ReadCamerasBinary(const std::string& path)
"""
cameras = {}
with open(path_to_model_file, "rb") as fid:
num_cameras = read_next_bytes(fid, 8, "Q")[0]
for _ in range(num_cameras):
camera_properties = read_next_bytes(
fid, num_bytes=24, format_char_sequence="iiQQ")
camera_id = camera_properties[0]
model_id = camera_properties[1]
model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name
width = camera_properties[2]
height = camera_properties[3]
num_params = CAMERA_MODEL_IDS[model_id].num_params
params = read_next_bytes(fid, num_bytes=8*num_params,
format_char_sequence="d"*num_params)
cameras[camera_id] = Camera(id=camera_id,
model=model_name,
width=width,
height=height,
params=np.array(params))
assert len(cameras) == num_cameras
return cameras
# Path: scene/colmap_loader.py
def read_points3D_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DBinary(const std::string& path)
void Reconstruction::WritePoints3DBinary(const std::string& path)
"""
with open(path_to_model_file, "rb") as fid:
num_points = read_next_bytes(fid, 8, "Q")[0]
xyzs = np.empty((num_points, 3))
rgbs = np.empty((num_points, 3))
errors = np.empty((num_points, 1))
for p_id in range(num_points):
binary_point_line_properties = read_next_bytes(
fid, num_bytes=43, format_char_sequence="QdddBBBd")
xyz = np.array(binary_point_line_properties[1:4])
rgb = np.array(binary_point_line_properties[4:7])
error = np.array(binary_point_line_properties[7])
track_length = read_next_bytes(
fid, num_bytes=8, format_char_sequence="Q")[0]
track_elems = read_next_bytes(
fid, num_bytes=8*track_length,
format_char_sequence="ii"*track_length)
xyzs[p_id] = xyz
rgbs[p_id] = rgb
errors[p_id] = error
return xyzs, rgbs, errors
# Path: scene/colmap_loader.py
def read_points3D_text(path):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DText(const std::string& path)
void Reconstruction::WritePoints3DText(const std::string& path)
"""
xyzs = None
rgbs = None
errors = None
num_points = 0
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
num_points += 1
xyzs = np.empty((num_points, 3))
rgbs = np.empty((num_points, 3))
errors = np.empty((num_points, 1))
count = 0
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
xyz = np.array(tuple(map(float, elems[1:4])))
rgb = np.array(tuple(map(int, elems[4:7])))
error = np.array(float(elems[7]))
xyzs[count] = xyz
rgbs[count] = rgb
errors[count] = error
count += 1
return xyzs, rgbs, errors
# Path: utils/graphics_utils.py
def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):
Rt = np.zeros((4, 4))
Rt[:3, :3] = R.transpose()
Rt[:3, 3] = t
Rt[3, 3] = 1.0
C2W = np.linalg.inv(Rt)
cam_center = C2W[:3, 3]
cam_center = (cam_center + translate) * scale
C2W[:3, 3] = cam_center
Rt = np.linalg.inv(C2W)
return np.float32(Rt)
# Path: utils/graphics_utils.py
def focal2fov(focal, pixels):
return 2*math.atan(pixels/(2*focal))
# Path: utils/graphics_utils.py
def fov2focal(fov, pixels):
return pixels / (2 * math.tan(fov / 2))
# Path: utils/sh_utils.py
def SH2RGB(sh):
return sh * C0 + 0.5
# Path: scene/gaussian_model.py
class GaussianModel:
def setup_functions(self):
def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):
def __init__(self, sh_degree : int):
def capture(self, include_feature=False):
def restore(self, model_args, training_args, mode='train'):
def get_scaling(self):
def get_rotation(self):
def get_xyz(self):
def get_features(self):
def get_opacity(self):
def get_language_feature(self):
def get_covariance(self, scaling_modifier = 1):
def oneupSHdegree(self):
def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float):
def training_setup(self, training_args):
def update_learning_rate(self, iteration):
def construct_list_of_attributes(self):
def save_ply(self, path):
def reset_opacity(self):
def load_ply(self, path):
def replace_tensor_to_optimizer(self, tensor, name):
def _prune_optimizer(self, mask):
def prune_points(self, mask):
def cat_tensors_to_optimizer(self, tensors_dict):
def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation):
def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):
def densify_and_clone(self, grads, grad_threshold, scene_extent):
def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size):
def add_densification_stats(self, viewspace_point_tensor, update_filter):
L = build_scaling_rotation(scaling_modifier * scaling, rotation)
# Path: scene/dataset_readers.py
import os
import sys
import numpy as np
import json
from PIL import Image
from typing import NamedTuple
from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \
read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text
from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal
from pathlib import Path
from plyfile import PlyData, PlyElement
from utils.sh_utils import SH2RGB
from scene.gaussian_model import BasicPointCloud
#
class CameraInfo(NamedTuple):
uid: int
R: np.array
T: np.array
FovY: np.array
FovX: np.array
image: np.array
image_path: str
image_name: str
width: int
height: int
class SceneInfo(NamedTuple):
point_cloud: BasicPointCloud
train_cameras: list
test_cameras: list
nerf_normalization: dict
ply_path: str
def getNerfppNorm(cam_info):
def get_center_and_diag(cam_centers):
cam_centers = np.hstack(cam_centers)
avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True)
center = avg_cam_center
dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True)
diagonal = np.max(dist)
return center.flatten(), diagonal
cam_centers = []
for cam in cam_info:
W2C = getWorld2View2(cam.R, cam.T)
C2W = np.linalg.inv(W2C)
cam_centers.append(C2W[:3, 3:4])
center, diagonal = get_center_and_diag(cam_centers)
radius = diagonal * 1.1
translate = -center
return {"translate": translate, "radius": radius}
def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder):
cam_infos = []
for idx, key in enumerate(cam_extrinsics):
sys.stdout.write('\r')
# the exact output you're looking for:
sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics)))
sys.stdout.flush()
extr = cam_extrinsics[key]
intr = cam_intrinsics[extr.camera_id]
height = intr.height
width = intr.width
uid = intr.id
R = np.transpose(qvec2rotmat(extr.qvec))
T = np.array(extr.tvec)
if intr.model=="SIMPLE_PINHOLE":
focal_length_x = intr.params[0]
FovY = focal2fov(focal_length_x, height)
FovX = focal2fov(focal_length_x, width)
elif intr.model=="PINHOLE":
focal_length_x = intr.params[0]
focal_length_y = intr.params[1]
FovY = focal2fov(focal_length_y, height)
FovX = focal2fov(focal_length_x, width)
elif intr.model=="SIMPLE_RADIAL":
focal_length_x = intr.params[0]
FovY = focal2fov(focal_length_x, height)
FovX = focal2fov(focal_length_x, width)
else:
assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!"
image_path = os.path.join(images_folder, os.path.basename(extr.name))
image_name = os.path.basename(image_path).split(".")[0]
image = Image.open(image_path)
cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, width=width, height=height)
cam_infos.append(cam_info)
sys.stdout.write('\n')
return cam_infos
def fetchPly(path):
plydata = PlyData.read(path)
vertices = plydata['vertex']
positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T
colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0
normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T
return BasicPointCloud(points=positions, colors=colors, normals=normals)
def storePly(path, xyz, rgb):
# Define the dtype for the structured array
dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'),
('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
normals = np.zeros_like(xyz)
elements = np.empty(xyz.shape[0], dtype=dtype)
attributes = np.concatenate((xyz, normals, rgb), axis=1)
elements[:] = list(map(tuple, attributes))
# Create the PlyData object and write to file
vertex_element = PlyElement.describe(elements, 'vertex')
ply_data = PlyData([vertex_element])
ply_data.write(path)
def readColmapSceneInfo(path, images, eval, llffhold=8):
try:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin")
cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file)
except:
| cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: alibaba/animate-anything
# Path: models/unet_3d_blocks.py
class CrossAttnDownBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
cross_attention_dim=1280,
output_scale_factor=1.0,
downsample_padding=1,
add_downsample=True,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
):
super().__init__()
resnets = []
attentions = []
temp_attentions = []
temp_convs = []
self.gradient_checkpointing = False
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
for i in range(num_layers):
in_channels = in_channels if i == 0 else out_channels
resnets.append(
ResnetBlock2D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
temp_convs.append(
TemporalConvLayer(
out_channels,
out_channels,
dropout=0.1
)
)
attentions.append(
Transformer2DModel(
out_channels // attn_num_head_channels,
attn_num_head_channels,
in_channels=out_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
)
)
temp_attentions.append(
TransformerTemporalModel(
out_channels // attn_num_head_channels,
attn_num_head_channels,
in_channels=out_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
)
)
self.resnets = nn.ModuleList(resnets)
self.temp_convs = nn.ModuleList(temp_convs)
self.attentions = nn.ModuleList(attentions)
self.temp_attentions = nn.ModuleList(temp_attentions)
if add_downsample:
self.downsamplers = nn.ModuleList(
[
Downsample2D(
out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
)
]
)
else:
self.downsamplers = None
def forward(
self,
hidden_states,
temb=None,
encoder_hidden_states=None,
attention_mask=None,
num_frames=1,
cross_attention_kwargs=None,
):
# TODO(Patrick, William) - attention mask is not used
output_states = ()
for resnet, temp_conv, attn, temp_attn in zip(
self.resnets, self.temp_convs, self.attentions, self.temp_attentions
):
if self.gradient_checkpointing:
hidden_states = cross_attn_g_c(
attn,
temp_attn,
resnet,
temp_conv,
hidden_states,
encoder_hidden_states,
cross_attention_kwargs,
temb,
num_frames,
inverse_temp=True
)
else:
hidden_states = resnet(hidden_states, temb)
if num_frames > 1:
hidden_states = temp_conv(hidden_states, num_frames=num_frames)
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
).sample
if num_frames > 1:
hidden_states = temp_attn(hidden_states, num_frames=num_frames).sample
output_states += (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states += (hidden_states,)
return hidden_states, output_states
# Path: models/unet_3d_blocks.py
class CrossAttnUpBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
prev_output_channel: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
cross_attention_dim=1280,
output_scale_factor=1.0,
add_upsample=True,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
):
super().__init__()
resnets = []
temp_convs = []
attentions = []
temp_attentions = []
self.gradient_checkpointing = False
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
for i in range(num_layers):
res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
resnet_in_channels = prev_output_channel if i == 0 else out_channels
resnets.append(
ResnetBlock2D(
in_channels=resnet_in_channels + res_skip_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
temp_convs.append(
TemporalConvLayer(
out_channels,
out_channels,
dropout=0.1
)
)
attentions.append(
Transformer2DModel(
out_channels // attn_num_head_channels,
attn_num_head_channels,
in_channels=out_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
)
)
temp_attentions.append(
TransformerTemporalModel(
out_channels // attn_num_head_channels,
attn_num_head_channels,
in_channels=out_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
)
)
self.resnets = nn.ModuleList(resnets)
self.temp_convs = nn.ModuleList(temp_convs)
self.attentions = nn.ModuleList(attentions)
self.temp_attentions = nn.ModuleList(temp_attentions)
if add_upsample:
self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
else:
self.upsamplers = None
def forward(
self,
hidden_states,
res_hidden_states_tuple,
temb=None,
encoder_hidden_states=None,
upsample_size=None,
attention_mask=None,
num_frames=1,
cross_attention_kwargs=None,
):
# TODO(Patrick, William) - attention mask is not used
for resnet, temp_conv, attn, temp_attn in zip(
self.resnets, self.temp_convs, self.attentions, self.temp_attentions
):
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
if self.gradient_checkpointing:
hidden_states = cross_attn_g_c(
attn,
temp_attn,
resnet,
temp_conv,
hidden_states,
encoder_hidden_states,
cross_attention_kwargs,
temb,
num_frames,
inverse_temp=True
)
else:
hidden_states = resnet(hidden_states, temb)
if num_frames > 1:
hidden_states = temp_conv(hidden_states, num_frames=num_frames)
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
).sample
if num_frames > 1:
hidden_states = temp_attn(hidden_states, num_frames=num_frames).sample
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, upsample_size)
return hidden_states
# Path: models/unet_3d_blocks.py
class DownBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
output_scale_factor=1.0,
add_downsample=True,
downsample_padding=1,
):
super().__init__()
resnets = []
temp_convs = []
self.gradient_checkpointing = False
for i in range(num_layers):
in_channels = in_channels if i == 0 else out_channels
resnets.append(
ResnetBlock2D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
temp_convs.append(
TemporalConvLayer(
out_channels,
out_channels,
dropout=0.1
)
)
self.resnets = nn.ModuleList(resnets)
self.temp_convs = nn.ModuleList(temp_convs)
if add_downsample:
self.downsamplers = nn.ModuleList(
[
Downsample2D(
out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
)
]
)
else:
self.downsamplers = None
def forward(self, hidden_states, temb=None, num_frames=1):
output_states = ()
for resnet, temp_conv in zip(self.resnets, self.temp_convs):
if self.gradient_checkpointing:
hidden_states = up_down_g_c(resnet, temp_conv, hidden_states, temb, num_frames)
else:
hidden_states = resnet(hidden_states, temb)
if num_frames > 1:
hidden_states = temp_conv(hidden_states, num_frames=num_frames)
output_states += (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states += (hidden_states,)
return hidden_states, output_states
# Path: models/unet_3d_blocks.py
class UNetMidBlock3DCrossAttn(nn.Module):
def __init__(
self,
in_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
output_scale_factor=1.0,
cross_attention_dim=1280,
dual_cross_attention=False,
use_linear_projection=True,
upcast_attention=False,
):
super().__init__()
self.gradient_checkpointing = False
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
# there is always at least one resnet
resnets = [
ResnetBlock2D(
in_channels=in_channels,
out_channels=in_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
]
temp_convs = [
TemporalConvLayer(
in_channels,
in_channels,
dropout=0.1
)
]
attentions = []
temp_attentions = []
for _ in range(num_layers):
attentions.append(
Transformer2DModel(
in_channels // attn_num_head_channels,
attn_num_head_channels,
in_channels=in_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
)
)
temp_attentions.append(
TransformerTemporalModel(
in_channels // attn_num_head_channels,
attn_num_head_channels,
in_channels=in_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
)
)
resnets.append(
ResnetBlock2D(
in_channels=in_channels,
out_channels=in_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
temp_convs.append(
TemporalConvLayer(
in_channels,
in_channels,
dropout=0.1
)
)
self.resnets = nn.ModuleList(resnets)
self.temp_convs = nn.ModuleList(temp_convs)
self.attentions = nn.ModuleList(attentions)
self.temp_attentions = nn.ModuleList(temp_attentions)
def forward(
self,
hidden_states,
temb=None,
encoder_hidden_states=None,
attention_mask=None,
num_frames=1,
cross_attention_kwargs=None,
):
if self.gradient_checkpointing:
hidden_states = up_down_g_c(
self.resnets[0],
self.temp_convs[0],
hidden_states,
temb,
num_frames
)
else:
hidden_states = self.resnets[0](hidden_states, temb)
hidden_states = self.temp_convs[0](hidden_states, num_frames=num_frames)
for attn, temp_attn, resnet, temp_conv in zip(
self.attentions, self.temp_attentions, self.resnets[1:], self.temp_convs[1:]
):
if self.gradient_checkpointing:
hidden_states = cross_attn_g_c(
attn,
temp_attn,
resnet,
temp_conv,
hidden_states,
encoder_hidden_states,
cross_attention_kwargs,
temb,
num_frames
)
else:
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
).sample
if num_frames > 1:
hidden_states = temp_attn(hidden_states, num_frames=num_frames).sample
hidden_states = resnet(hidden_states, temb)
if num_frames > 1:
hidden_states = temp_conv(hidden_states, num_frames=num_frames)
return hidden_states
# Path: models/unet_3d_blocks.py
class UpBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
prev_output_channel: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
output_scale_factor=1.0,
add_upsample=True,
):
super().__init__()
resnets = []
temp_convs = []
self.gradient_checkpointing = False
for i in range(num_layers):
res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
resnet_in_channels = prev_output_channel if i == 0 else out_channels
resnets.append(
ResnetBlock2D(
in_channels=resnet_in_channels + res_skip_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
temp_convs.append(
TemporalConvLayer(
out_channels,
out_channels,
dropout=0.1
)
)
self.resnets = nn.ModuleList(resnets)
self.temp_convs = nn.ModuleList(temp_convs)
if add_upsample:
self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
else:
self.upsamplers = None
def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, num_frames=1):
for resnet, temp_conv in zip(self.resnets, self.temp_convs):
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
if self.gradient_checkpointing:
hidden_states = up_down_g_c(resnet, temp_conv, hidden_states, temb, num_frames)
else:
hidden_states = resnet(hidden_states, temb)
if num_frames > 1:
hidden_states = temp_conv(hidden_states, num_frames=num_frames)
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, upsample_size)
return hidden_states
# Path: models/unet_3d_blocks.py
def get_down_block(
down_block_type,
num_layers,
in_channels,
out_channels,
temb_channels,
add_downsample,
resnet_eps,
resnet_act_fn,
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
downsample_padding=None,
dual_cross_attention=False,
use_linear_projection=True,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
):
if down_block_type == "DownBlock3D":
return DownBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
resnet_time_scale_shift=resnet_time_scale_shift,
)
elif down_block_type == "CrossAttnDownBlock3D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock3D")
return CrossAttnDownBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attn_num_head_channels,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
)
raise ValueError(f"{down_block_type} does not exist.")
# Path: models/unet_3d_blocks.py
def get_up_block(
up_block_type,
num_layers,
in_channels,
out_channels,
prev_output_channel,
temb_channels,
add_upsample,
resnet_eps,
resnet_act_fn,
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
dual_cross_attention=False,
use_linear_projection=True,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
):
if up_block_type == "UpBlock3D":
return UpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
resnet_time_scale_shift=resnet_time_scale_shift,
)
elif up_block_type == "CrossAttnUpBlock3D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock3D")
return CrossAttnUpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attn_num_head_channels,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
)
raise ValueError(f"{up_block_type} does not exist.")
# Path: models/unet_3d_blocks.py
def transformer_g_c(transformer, sample, num_frames):
sample = g_c(custom_checkpoint(transformer, mode='temp'),
sample, num_frames, use_reentrant=False
)['sample']
return sample
# Path: models/unet_3d_condition_mask.py
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.utils import BaseOutput, logging
from diffusers.models.embeddings import TimestepEmbedding, Timesteps
from diffusers.models.modeling_utils import ModelMixin
from diffusers.models.transformer_temporal import TransformerTemporalModel
from einops import rearrange, repeat
from .unet_3d_blocks import (
CrossAttnDownBlock3D,
CrossAttnUpBlock3D,
DownBlock3D,
UNetMidBlock3DCrossAttn,
UpBlock3D,
get_down_block,
get_up_block,
transformer_g_c
)
import torch
import torch.nn as nn
import torch.utils.checkpoint
# Copyright 2023 Alibaba DAMO-VILAB and The HuggingFace Team. All rights reserved.
# Copyright 2023 The ModelScope Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet3DConditionOutput(BaseOutput):
"""
Args:
sample (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`):
Hidden states conditioned on `encoder_hidden_states` input. Output of last layer of model.
"""
sample: torch.FloatTensor
class UNet3DConditionModel(ModelMixin, ConfigMixin):
r"""
UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep
and returns sample shaped output.
This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
implements for all the models (such as downloading or saving, etc.)
Parameters:
sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
Height and width of input/output sample.
in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample.
out_channels (`int`, *optional*, defaults to 4): The number of channels in the output.
down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
The tuple of downsample blocks to use.
up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D",)`):
The tuple of upsample blocks to use.
block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
| The tuple of output channels for each block. |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: yohanshin/WHAM
# Path: configs/constants.py
IMG_FEAT_DIM = {
'resnet': 2048,
'vit': 1024
}
N_JOINTS = 17
PARSED_DATA = f'{root}/parsed_data'
THREEDPW_PTH = f'{root}/3DPW'
RICH_PTH = f'{root}/RICH'
EMDB_PTH = f'{root}/EMDB'
NUM_JOINTS = N_JOINTS
H36M_TO_J17 = [6, 5, 4, 1, 2, 3, 16, 15, 14, 11, 12, 13, 8, 10, 0, 7, 9]
H36M_TO_J14 = H36M_TO_J17[:14]
J17_TO_H36M = [14, 3, 4, 5, 2, 1, 0, 15, 12, 16, 13, 9, 10, 11, 8, 7, 6]
COCO_AUG_DICT = f'{root}/body_models/coco_aug_dict.pth'
TREE = [[5, 6], 0, 0, 1, 2, -1, -1, 5, 6, 7, 8, -1, -1, 11, 12, 13, 14, 15, 15, 15, 16, 16, 16]
S_BIAS = 1e-1
S_JITTERING = 5e-2
S_PEAK = 3e-1
S_PEAK_MASK = 5e-3
S_MASK = 0.03
MAIN_JOINTS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] # reduced_joints
FLDR = f'{root}/body_models/smpl/'
SMPLX2SMPL = f'{root}/body_models/smplx2smpl.pkl'
FACES = f'{root}/body_models/smpl_faces.npy'
MEAN_PARAMS = f'{root}/body_models/smpl_mean_params.npz'
JOINTS_REGRESSOR_WHAM = f'{root}/body_models/J_regressor_wham.npy'
JOINTS_REGRESSOR_H36M = f'{root}/body_models/J_regressor_h36m.npy'
JOINTS_REGRESSOR_EXTRA = f'{root}/body_models/J_regressor_extra.npy'
JOINTS_REGRESSOR_FEET = f'{root}/body_models/J_regressor_feet.npy'
PARENTS = torch.tensor([
-1, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 12, 13, 14, 16, 17, 18, 19, 20, 21])
class PATHS:
class KEYPOINTS:
class BMODEL:
# Path: lib/utils/transforms.py
def quaternion_to_matrix(quaternions: torch.Tensor) -> torch.Tensor:
def _copysign(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
def _sqrt_positive_part(x: torch.Tensor) -> torch.Tensor:
def matrix_to_quaternion(matrix: torch.Tensor) -> torch.Tensor:
def _axis_angle_rotation(axis: str, angle: torch.Tensor) -> torch.Tensor:
def euler_angles_to_matrix(euler_angles: torch.Tensor, convention: str) -> torch.Tensor:
def _angle_from_tan(
axis: str, other_axis: str, data, horizontal: bool, tait_bryan: bool
) -> torch.Tensor:
def _index_from_letter(letter: str) -> int:
def matrix_to_euler_angles(matrix: torch.Tensor, convention: str) -> torch.Tensor:
def random_quaternions(
n: int, dtype: Optional[torch.dtype] = None, device: Optional[Device] = None
) -> torch.Tensor:
def random_rotations(
n: int, dtype: Optional[torch.dtype] = None, device: Optional[Device] = None
) -> torch.Tensor:
def random_rotation(
dtype: Optional[torch.dtype] = None, device: Optional[Device] = None
) -> torch.Tensor:
def standardize_quaternion(quaternions: torch.Tensor) -> torch.Tensor:
def quaternion_raw_multiply(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
def quaternion_multiply(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
def quaternion_invert(quaternion: torch.Tensor) -> torch.Tensor:
def quaternion_apply(quaternion: torch.Tensor, point: torch.Tensor) -> torch.Tensor:
def axis_angle_to_matrix(axis_angle: torch.Tensor) -> torch.Tensor:
def matrix_to_axis_angle(matrix: torch.Tensor) -> torch.Tensor:
def axis_angle_to_quaternion(axis_angle: torch.Tensor) -> torch.Tensor:
def quaternion_to_axis_angle(quaternions: torch.Tensor) -> torch.Tensor:
def rotation_6d_to_matrix(d6: torch.Tensor) -> torch.Tensor:
def matrix_to_rotation_6d(matrix: torch.Tensor) -> torch.Tensor:
def clean_rotation_6d(d6d: torch.Tensor) -> torch.Tensor:
def rot6d_to_rotmat(x):
def rotmat_to_rot6d(x):
def convert_rotation_matrix_to_homogeneous(rotation_matrix):
def rotation_matrix_to_angle_axis(rotation_matrix):
def rotation_matrix_to_quaternion(rotation_matrix, eps=1e-6):
def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor:
# Path: lib/models/layers/modules.py
class MotionEncoder(nn.Module):
def __init__(self,
in_dim,
d_embed,
pose_dr,
rnn_type,
n_layers,
n_joints):
super().__init__()
self.n_joints = n_joints
self.embed_layer = nn.Linear(in_dim, d_embed)
self.pos_drop = nn.Dropout(pose_dr)
# Keypoints initializer
self.neural_init = NeuralInitialization(n_joints * 3 + in_dim, d_embed, rnn_type, n_layers)
# 3d keypoints regressor
self.regressor = Regressor(
d_embed, d_embed, [n_joints * 3], n_joints * 3, rnn_type, n_layers)
def forward(self, x, init):
""" Forward pass of motion encoder.
"""
self.b, self.f = x.shape[:2]
x = self.embed_layer(x.reshape(self.b, self.f, -1))
x = self.pos_drop(x)
h0 = self.neural_init(init)
pred_list = [init[..., :self.n_joints * 3]]
motion_context_list = []
for i in range(self.f):
(pred_kp3d, ), motion_context, h0 = self.regressor(x[:, [i]], pred_list[-1:], h0)
motion_context_list.append(motion_context)
pred_list.append(pred_kp3d)
pred_kp3d = torch.cat(pred_list[1:], dim=1).view(self.b, self.f, -1, 3)
motion_context = torch.cat(motion_context_list, dim=1)
# Merge 3D keypoints with motion context
motion_context = torch.cat((motion_context, pred_kp3d.reshape(self.b, self.f, -1)), dim=-1)
return pred_kp3d, motion_context
# Path: lib/models/layers/modules.py
class MotionDecoder(nn.Module):
def __init__(self,
d_embed,
rnn_type,
n_layers):
super().__init__()
self.n_pose = 24
# SMPL pose initialization
self.neural_init = NeuralInitialization(len(_C.BMODEL.MAIN_JOINTS) * 6, d_embed, rnn_type, n_layers)
# 3d keypoints regressor
self.regressor = Regressor(
d_embed, d_embed, [self.n_pose * 6, 10, 3, 4], self.n_pose * 6, rnn_type, n_layers)
def forward(self, x, init):
""" Forward pass of motion decoder.
"""
b, f = x.shape[:2]
h0 = self.neural_init(init[:, :, _C.BMODEL.MAIN_JOINTS].reshape(b, 1, -1))
# Recursive prediction of SMPL parameters
pred_pose_list = [init.reshape(b, 1, -1)]
pred_shape_list, pred_cam_list, pred_contact_list = [], [], []
for i in range(f):
# Camera coordinate estimation
(pred_pose, pred_shape, pred_cam, pred_contact), _, h0 = self.regressor(x[:, [i]], pred_pose_list[-1:], h0)
pred_pose_list.append(pred_pose)
pred_shape_list.append(pred_shape)
pred_cam_list.append(pred_cam)
pred_contact_list.append(pred_contact)
pred_pose = torch.cat(pred_pose_list[1:], dim=1).view(b, f, -1)
pred_shape = torch.cat(pred_shape_list, dim=1).view(b, f, -1)
pred_cam = torch.cat(pred_cam_list, dim=1).view(b, f, -1)
pred_contact = torch.cat(pred_contact_list, dim=1).view(b, f, -1)
return pred_pose, pred_shape, pred_cam, pred_contact
# Path: lib/models/layers/modules.py
class TrajectoryDecoder(nn.Module):
def __init__(self,
d_embed,
rnn_type,
n_layers):
super().__init__()
# Trajectory regressor
self.regressor = Regressor(
d_embed, d_embed, [3, 6], 12, rnn_type, n_layers, )
def forward(self, x, root, cam_a, h0=None):
""" Forward pass of trajectory decoder.
"""
b, f = x.shape[:2]
pred_root_list, pred_vel_list = [root[:, :1]], []
for i in range(f):
# Global coordinate estimation
(pred_rootv, pred_rootr), _, h0 = self.regressor(
x[:, [i]], [pred_root_list[-1], cam_a[:, [i]]], h0)
pred_root_list.append(pred_rootr)
pred_vel_list.append(pred_rootv)
pred_root = torch.cat(pred_root_list, dim=1).view(b, f + 1, -1)
pred_vel = torch.cat(pred_vel_list, dim=1).view(b, f, -1)
return pred_root, pred_vel
# Path: lib/models/layers/modules.py
class TrajectoryRefiner(nn.Module):
def __init__(self,
d_embed,
d_hidden,
rnn_type,
n_layers):
super().__init__()
d_input = d_embed + 12
self.refiner = Regressor(
d_input, d_hidden, [6, 3], 9, rnn_type, n_layers)
def forward(self, context, pred_vel, output, cam_angvel, return_y_up):
b, f = context.shape[:2]
# Register values
pred_pose = output['pose'].clone().detach()
pred_root = output['poses_root_r6d'].clone().detach()
feet = output['feet'].clone().detach()
contact = output['contact'].clone().detach()
feet_vel = torch.cat((torch.zeros_like(feet[:, :1]), feet[:, 1:] - feet[:, :-1]), dim=1) * 30 # Normalize to 30 times
feet = (feet_vel * contact.unsqueeze(-1)).reshape(b, f, -1) # Velocity input
inpt_feat = torch.cat([context, feet], dim=-1)
(delta_root, delta_vel), _, _ = self.refiner(inpt_feat, [pred_root[:, 1:], pred_vel], h0=None)
pred_root[:, 1:] = pred_root[:, 1:] + delta_root
pred_vel = pred_vel + delta_vel
root_world, trans_world = rollout_global_motion(pred_root, pred_vel)
if return_y_up:
yup2ydown = axis_angle_to_matrix(torch.tensor([[np.pi, 0, 0]])).float().to(root_world.device)
root_world = yup2ydown.mT @ root_world
trans_world = (yup2ydown.mT @ trans_world.unsqueeze(-1)).squeeze(-1)
output.update({
'poses_root_r6d_refined': pred_root,
'vel_root_refined': pred_vel,
'poses_root_world': root_world,
'trans_world': trans_world,
})
return output
# Path: lib/models/layers/modules.py
class Integrator(nn.Module):
def __init__(self, in_channel, out_channel, hid_channel=1024):
super().__init__()
self.layer1 = nn.Linear(in_channel, hid_channel)
self.relu1 = nn.ReLU()
self.dr1 = nn.Dropout(0.1)
self.layer2 = nn.Linear(hid_channel, hid_channel)
self.relu2 = nn.ReLU()
self.dr2 = nn.Dropout(0.1)
self.layer3 = nn.Linear(hid_channel, out_channel)
def forward(self, x, feat):
res = x
mask = (feat != 0).all(dim=-1).all(dim=-1)
out = torch.cat((x, feat), dim=-1)
out = self.layer1(out)
out = self.relu1(out)
out = self.dr1(out)
out = self.layer2(out)
out = self.relu2(out)
out = self.dr2(out)
out = self.layer3(out)
out[mask] = out[mask] + res[mask]
return out
# Path: lib/models/layers/utils.py
def rollout_global_motion(root_r, root_v, init_trans=None):
b, f = root_v.shape[:2]
root = transforms.rotation_6d_to_matrix(root_r[:])
vel_world = (root[:, :-1] @ root_v.unsqueeze(-1)).squeeze(-1)
trans = torch.cumsum(vel_world, dim=1)
if init_trans is not None: trans = trans + init_trans
return root[:, 1:], trans
# Path: lib/models/layers/utils.py
def compute_camera_pose(root_c_d6d, root_w):
root_c = transforms.rotation_6d_to_matrix(root_c_d6d) # Root orient in cam coord
cam_R = root_c @ root_w.mT
return cam_R
# Path: lib/models/layers/utils.py
def reset_root_velocity(smpl, output, stationary, pred_ori, pred_vel, thr=0.7):
b, f = pred_vel.shape[:2]
stationary_mask = (stationary.clone().detach() > thr).unsqueeze(-1).float()
poses_root = transforms.rotation_6d_to_matrix(pred_ori.clone().detach())
vel_world = (poses_root[:, 1:] @ pred_vel.clone().detach().unsqueeze(-1)).squeeze(-1)
output = smpl.get_output(body_pose=output.body_pose.clone().detach(),
global_orient=poses_root[:, 1:].reshape(-1, 1, 3, 3),
betas=output.betas.clone().detach(),
pose2rot=False)
feet = output.feet.reshape(b, f, 4, 3)
feet_vel = feet[:, 1:] - feet[:, :-1] + vel_world[:, 1:].unsqueeze(-2)
feet_vel = torch.cat((torch.zeros_like(feet_vel[:, :1]), feet_vel), dim=1)
stationary_vel = feet_vel * stationary_mask
del_vel = stationary_vel.sum(dim=2) / ((stationary_vel != 0).sum(dim=2) + 1e-4)
vel_world_update = vel_world - del_vel
vel_root = (poses_root[:, 1:].mT @ vel_world_update.unsqueeze(-1)).squeeze(-1)
return vel_root
# Path: lib/models/layers/utils.py
def compute_camera_motion(output, root_c_d6d, root_w, trans, pred_cam):
root_c = transforms.rotation_6d_to_matrix(root_c_d6d) # Root orient in cam coord
cam_R = root_c @ root_w.mT
pelvis_cam = output.full_cam.view_as(pred_cam)
pelvis_world = (cam_R.mT @ pelvis_cam.unsqueeze(-1)).squeeze(-1)
cam_T_world = pelvis_world - trans
cam_T = (cam_R @ cam_T_world.unsqueeze(-1)).squeeze(-1)
return cam_R, cam_T
# Path: lib/models/wham.py
import torch
from torch import nn
from configs import constants as _C
from lib.utils import transforms
from lib.models.layers import (MotionEncoder, MotionDecoder, TrajectoryDecoder, TrajectoryRefiner, Integrator,
rollout_global_motion, compute_camera_pose, reset_root_velocity, compute_camera_motion)
self.mask_embedding = nn.Parameter(torch.zeros(1, 1, n_joints, 2))
# Module 1. Motion Encoder
self.motion_encoder = MotionEncoder(in_dim=in_dim,
d_embed=d_embed,
pose_dr=pose_dr,
rnn_type=rnn_type,
n_layers=n_layers,
n_joints=n_joints)
self.trajectory_decoder = TrajectoryDecoder(d_embed=d_context,
rnn_type=rnn_type,
n_layers=n_layers)
# Module 3. Feature Integrator
self.integrator = Integrator(in_channel=d_feat + d_context,
out_channel=d_context)
# Module 4. Motion Decoder
self.motion_decoder = MotionDecoder(d_embed=d_context,
rnn_type=rnn_type,
n_layers=n_layers)
# Module 5. Trajectory Refiner
self.trajectory_refiner = TrajectoryRefiner(d_embed=d_context,
d_hidden=d_embed,
rnn_type=rnn_type,
n_layers=2)
@property
def __version__(self, ):
return 'v07'
def compute_global_feet(self, duplicate=False):
# Global motion
init_trans = None# if self.training else self.output.full_cam.reshape(self.b, self.f, 3)[:, [0]]
root_world, trans = rollout_global_motion(self.pred_root, self.pred_vel, init_trans)
# # Compute world-coordinate motion
# if not duplicate:
# self.global_output = self.smpl.get_output(
# global_orient=root_world.reshape(self.b * self.f, 1, 3, 3), body_pose=self.output.body_pose,
# betas=self.output.betas, pose2rot=False
# )
# feet_world = self.global_output.feet.reshape(self.b, self.f, 4, 3) + trans.unsqueeze(-2)
cam_R, cam_T = compute_camera_motion(self.output, self.pred_pose[:, :, :6], root_world, trans, self.pred_cam)
feet_cam = self.output.feet.reshape(self.b, self.f, -1, 3) + self.output.full_cam.reshape(self.b, self.f, 1, 3)
feet_world = (cam_R.mT @ (feet_cam - cam_T.unsqueeze(-2)).mT).mT
return feet_world
def forward_smpl(self, **kwargs):
self.output = self.smpl(self.pred_pose,
self.pred_shape,
cam=self.pred_cam,
return_full_pose=not self.training,
**kwargs,
)
kp3d = self.output.joints
# Feet location in global coordinate
feet_world = self.compute_global_feet()
# Return output
output = {'feet': feet_world,
'contact': self.pred_contact,
'pose': self.pred_pose,
'betas': self.pred_shape,
'poses_root_cam': self.output.global_orient,
'verts_cam': self.output.vertices}
if self.training:
pass # TODO: Update training code
else:
pose = transforms.matrix_to_axis_angle(self.output.full_pose).reshape(-1, 72)
theta = torch.cat((self.output.full_cam, pose, self.pred_shape.squeeze(0)), dim=-1)
output.update({
'poses_root_r6d': self.pred_root,
'trans_cam': self.output.full_cam,
'poses_body': self.output.body_pose})
return output
def forward(self, x, inits, img_features=None, mask=None, init_root=None, cam_angvel=None,
cam_intrinsics=None, bbox=None, res=None, return_y_up=False, **kwargs):
self.b, self.f = x.shape[:2]
init_kp, init_smpl = inits
# Treat masked keypoints
mask_embedding = mask.unsqueeze(-1) * self.mask_embedding
_mask = mask.unsqueeze(-1).repeat(1, 1, 1, 2).reshape(self.b, self.f, -1)
_mask = torch.cat((_mask, torch.zeros_like(_mask[..., :3])), dim=-1)
_mask_embedding = mask_embedding.reshape(self.b, self.f, -1)
_mask_embedding = torch.cat((_mask_embedding, torch.zeros_like(_mask_embedding[..., :3])), dim=-1)
x[_mask] = 0.0
x = x + _mask_embedding
# --------- Inference --------- #
# Stage 1. Encode motion
pred_kp3d, motion_context = self.motion_encoder(x, init_kp)
old_motion_context = motion_context.detach().clone()
# Stage 2. Decode global trajectory
pred_root, pred_vel = self.trajectory_decoder(motion_context, init_root, cam_angvel)
# Stage 3. Integrate features
if img_features is not None and self.integrator is not None:
motion_context = self.integrator(motion_context, img_features)
# Stage 4. Decode SMPL motion
pred_pose, pred_shape, pred_cam, pred_contact = self.motion_decoder(motion_context, init_smpl)
# --------- #
# --------- Register predictions --------- #
self.pred_kp3d = pred_kp3d
self.pred_root = pred_root
self.pred_vel = pred_vel
self.pred_pose = pred_pose
| self.pred_shape = pred_shape |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: octo-models/octo
# Path: octo/model/octo_model.py
class OctoModel:
"""Recommended way of interacting with Octo models.
Usage for inference:
>>> model = OctoModel.load_pretrained(checkpoint_dir)
>>> tasks = model.create_tasks(texts=["go to the red room"])
>>> # or tasks = model.create_tasks(goals={"image_primary": goal_images})
>>> actions = model.sample_actions(observations, tasks, rng=jax.random.PRNGKey(0))
>>> # Note: these are normalized actions (processed to mean 0 and std 1). To get the raw actions,
# un-normalize them using model.dataset_statistics
Usage for finetuning:
>>> model = OctoModel.load_pretrained(checkpoint_dir)
>>> train_state = octo.utils.train_utils.TrainState.create(
rng=jax.random.PRNGKey(0),
model=model,
tx=optax.adamw(...)
)
>>> # access params through train_state.model.params
>>> train_state, metrics = your_update_function(train_state, batch)
>>> # when it's time to save (note that this only saves the model parameters,
>>> # not the full optimizer state)
>>> train_state.model.save_pretrained(step, save_dir)
Usage for pretraining:
>>> model = OctoModel.from_config(
config,
example_batch,
text_processor
) # initializes params
>>> # Continue as in finetuning example
See full usage examples in train.py and finetune.py.
"""
module: OctoModule = struct.field(pytree_node=False)
text_processor: TextProcessor = struct.field(pytree_node=False)
config: Config = struct.field(pytree_node=False)
params: Params
example_batch: Data
dataset_statistics: Optional[Data]
def create_tasks(
self, goals: Optional[Data] = None, texts: Optional[Sequence[str]] = None
):
"""Creates tasks dict from goals and texts.
Args:
goals: if not None, dict of arrays with shape (batch_size, *)
texts: if not None, list of texts of length batch_size
Omit images to run the language-conditioned model, and omit texts to run the
goal-conditioned model.
"""
assert goals is not None or texts is not None
tasks = {"pad_mask_dict": {}}
if goals is not None:
tasks.update(goals)
tasks["pad_mask_dict"].update(
{k: np.ones(v.shape[:1], dtype=bool) for k, v in goals.items()}
)
else:
batch_size = len(texts)
tasks.update(
{
k: np.zeros((batch_size, *v.shape[1:]), dtype=v.dtype)
for k, v in self.example_batch["task"].items()
if k not in ("pad_mask_dict", "language_instruction")
}
)
tasks["pad_mask_dict"].update(
{
k: np.zeros(batch_size, dtype=bool)
for k in tasks.keys()
if k != "pad_mask_dict"
}
)
if texts is not None:
assert self.text_processor is not None
tasks["language_instruction"] = texts
tasks["pad_mask_dict"]["language_instruction"] = np.ones(
len(texts), dtype=bool
)
else:
batch_size = jax.tree_leaves(goals)[0].shape[0]
tasks["language_instruction"] = [""] * batch_size
tasks["pad_mask_dict"]["language_instruction"] = np.zeros(
batch_size, dtype=bool
)
if self.text_processor is not None:
tasks["language_instruction"] = self.text_processor.encode(
tasks["language_instruction"]
)
else:
del tasks["language_instruction"]
_verify_shapes(tasks, "tasks", self.example_batch["task"], starting_dim=1)
return tasks
@partial(jax.jit, static_argnames=("train",))
def run_transformer(
self, observations: Data, tasks: Data, pad_mask: ArrayLike, train: bool = False
):
"""Runs the transformer, but does shape checking on the inputs.
Args:
observations: dictionary of arrays of shape (batch_size, window_size, *shape).
Shape must be consistent with self.example_batch["observation"]
tasks: dict of tasks of shape (batch_size, *shape)
Shape must be consistent with self.example_batch["task"]
pad_mask: (batch_size, window_size) Boolean mask that is False when the timestep corresponds to padding
train: whether to run in train mode
"""
_verify_shapes(
observations,
"observations",
self.example_batch["observation"],
starting_dim=2,
)
_verify_shapes(tasks, "tasks", self.example_batch["task"], starting_dim=1)
return self.module.apply(
{"params": self.params},
observations,
tasks,
pad_mask,
train=train,
method="octo_transformer",
)
@partial(jax.jit, static_argnames=("train", "sample_shape", "argmax"))
def sample_actions(
self,
observations: Data,
tasks: Data,
pad_mask: Optional[ArrayLike] = None,
train: bool = False,
argmax: bool = False,
sample_shape: Tuple[int, ...] = (),
rng: Optional[PRNGKey] = None,
temperature: float = 1.0,
):
"""Samples actions from the model. See `action_heads.py` for more info.
Args:
observations: dictionary of arrays of shape (batch_size, window_size, *)
tasks: dict of tasks of shape (batch_size, *)
pad_mask: (batch_size, window_size) Boolean mask that is False when the timestep corresponds to padding
train: whether to run in train mode
...see `action_heads.py` for the rest of the kwargs.
Returns:
actions: (*sample_shape, batch_size, pred_horizon, action_dim)
"""
if pad_mask is None:
pad_mask = observations["pad_mask"]
transformer_outputs = self.run_transformer(
observations, tasks, pad_mask, train=train
)
action_head: ActionHead = self.module.bind({"params": self.params}).heads[
"action"
]
return action_head.predict_action(
transformer_outputs,
train=train,
argmax=argmax,
sample_shape=sample_shape,
rng=rng,
temperature=temperature,
)
@classmethod
def load_pretrained(
cls,
checkpoint_path: str,
step: Optional[int] = None,
) -> "OctoModel":
"""Loads a model from a checkpoint that was saved via `save_pretrained`.
Args:
checkpoint_path (str): A path to either a directory of checkpoints or a single checkpoint.
step (int, optional): If multiple checkpoints are present, which one to load. Defaults to the latest.
"""
if checkpoint_path.startswith("hf://"):
if step:
raise ValueError(
"You can't set config['pretrained_step'] when loading from HuggingFace."
)
checkpoint_path = _download_from_huggingface(
checkpoint_path.removeprefix("hf://")
)
# load config
with tf.io.gfile.GFile(
tf.io.gfile.join(checkpoint_path, "config.json"), "r"
) as f:
config = json.load(f)
# load example batch
with tf.io.gfile.GFile(
tf.io.gfile.join(checkpoint_path, "example_batch.msgpack"), "rb"
) as f:
example_batch = flax.serialization.msgpack_restore(f.read())
# shim for migrating from "tasks" to "task"
if "tasks" in example_batch:
example_batch["task"] = example_batch.pop("tasks")
logging.debug(
"Model was trained with observations: %s",
flax.core.pretty_repr(
jax.tree_map(jnp.shape, example_batch["observation"])
),
)
logging.debug(
"Model was trained with tasks: %s",
flax.core.pretty_repr(jax.tree_map(jnp.shape, example_batch["task"])),
)
# load dataset statistics
with tf.io.gfile.GFile(
tf.io.gfile.join(checkpoint_path, "dataset_statistics.json"), "r"
) as f:
dataset_statistics = json.load(f)
dataset_statistics = jax.tree_map(
np.array, dataset_statistics, is_leaf=lambda x: not isinstance(x, dict)
)
# create model def (an OctoModule)
module = OctoModule.create(**config["model"])
# infer params shape without actually doing any computation
params_shape = jax.eval_shape(
partial(module.init, train=False),
jax.random.PRNGKey(0),
example_batch["observation"],
example_batch["task"],
example_batch["observation"]["pad_mask"],
)["params"]
# restore params, checking to make sure the shape matches
checkpointer = orbax.checkpoint.CheckpointManager(
checkpoint_path, orbax.checkpoint.PyTreeCheckpointer()
)
step = step if step is not None else checkpointer.latest_step()
params = checkpointer.restore(step, params_shape)
if config["text_processor"] is not None:
text_processor = ModuleSpec.instantiate(config["text_processor"])()
else:
text_processor = None
return cls(
module=module,
params=params,
text_processor=text_processor,
example_batch=example_batch,
config=config,
dataset_statistics=dataset_statistics,
)
def save_pretrained(
self,
step: int,
checkpoint_path: Optional[str] = None,
checkpoint_manager: Optional[orbax.checkpoint.CheckpointManager] = None,
):
"""Saves a model, as well as corresponding metadata needed for `load_pretrained`. Takes either a
pre-existing checkpoint manager (which already knows where to save the checkpoint) or a path to a
directory to save the checkpoint to.
Args:
step (int): Step number.
checkpoint_path (str, optional): Path to save the checkpoint.
checkpoint_manager (optional): Checkpoint manager to save the checkpoint.
params (optional): Params to save. If None, uses self.params.
"""
if (checkpoint_path is None) == (checkpoint_manager is None):
raise ValueError(
"Must provide exactly one of checkpoint_path or checkpoint_manager."
)
if checkpoint_manager is None:
checkpoint_manager = orbax.checkpoint.CheckpointManager(
checkpoint_path, orbax.checkpoint.PyTreeCheckpointer()
)
if checkpoint_path is None:
checkpoint_path = str(checkpoint_manager._directory)
# save params
checkpoint_manager.save(
step,
self.params,
{"save_args": orbax_utils.save_args_from_target(self.params)},
)
if jax.process_index() == 0:
# save config
config_path = tf.io.gfile.join(checkpoint_path, "config.json")
if not tf.io.gfile.exists(config_path):
with tf.io.gfile.GFile(config_path, "w") as f:
json.dump(self.config, f)
# save example batch
example_batch_path = tf.io.gfile.join(
checkpoint_path, "example_batch.msgpack"
)
if not tf.io.gfile.exists(example_batch_path):
with tf.io.gfile.GFile(example_batch_path, "wb") as f:
f.write(flax.serialization.msgpack_serialize(self.example_batch))
# save dataset statistics
dataset_statistics_path = tf.io.gfile.join(
checkpoint_path, "dataset_statistics.json"
)
if not tf.io.gfile.exists(dataset_statistics_path):
with tf.io.gfile.GFile(dataset_statistics_path, "w") as f:
json.dump(
jax.tree_map(lambda x: x.tolist(), self.dataset_statistics),
f,
)
@classmethod
def from_config(
cls,
config: Config,
example_batch: Data,
text_processor: Optional[Any] = None,
verbose: bool = False,
rng: Optional[PRNGKey] = None,
dataset_statistics: Optional[Data] = None,
):
"""Initializes a model with a fresh set of weights from a given config + example_batch.
Args:
config (Dict[str, Any]): Config dict. The only required key is "model", but other configuration
may be saved for posterity.
example_batch (Dict[str, Any]): Example batch.
text_processor (Any, optional): Preprocessor for text inputs.
verbose (bool, optional): Whether to print out a summary of the model.
rng (Optional[PRNGKey], optional): RNG key for initializing the model.
dataset_statistics (Optional[Dict[str, Any]], optional): Dataset statistics.
"""
module = OctoModule.create(**config["model"])
rng = rng if rng is not None else jax.random.PRNGKey(0)
example_batch = multihost_utils.process_allgather(example_batch)
example_batch = jax.tree_map(lambda x: x[:1], example_batch)
init_args = (
example_batch["observation"],
example_batch["task"],
example_batch["observation"]["pad_mask"],
)
if verbose:
print(
module.tabulate(rng, *init_args, train=False, verbose=True, depth=2)
) # Prints out the parameter count of our model, and tokenizer details
@jax.jit
def _init(rng):
return module.init(rng, *init_args, train=False)
params = _init(rng)["params"]
return cls(
module=module,
params=params,
text_processor=text_processor,
example_batch=example_batch,
config=config,
dataset_statistics=dataset_statistics,
)
def get_pretty_spec(self):
"""Brief summary of the model's expected inputs and outputs."""
# TODO: generalize this to print out proprio when it is being tokenized
window_size = self.example_batch["observation"]["pad_mask"].shape[1]
observation_space = {
k: ("batch", "history_window", *v.shape[2:])
for k, v in self.example_batch["observation"].items()
if k.startswith("image")
}
task_space = {
k: ("batch", *v.shape[1:])
for k, v in self.example_batch["task"].items()
if k.startswith("image")
}
if self.text_processor is not None:
task_space["language_instruction"] = jax.tree_map(
lambda arr: ("batch", *arr.shape[1:]),
self.example_batch["task"]["language_instruction"],
)
try:
action_head = self.module.heads["action"]
action_head_repr = str(action_head.__class__)
action_dim, pred_horizon = action_head.action_dim, action_head.pred_horizon
except:
action_head_repr, action_dim, pred_horizon = "", None, None
return SPEC_TEMPLATE.format(
window_size=window_size,
observation_space=flax.core.pretty_repr(observation_space),
task_space=flax.core.pretty_repr(task_space),
action_head_repr=action_head_repr,
action_dim=action_dim,
pred_horizon=pred_horizon,
)
# Path: octo/utils/gym_wrappers.py
class HistoryWrapper(gym.Wrapper):
"""
Accumulates the observation history into `horizon` size chunks. If the length of the history
is less than the length of the horizon, we pad the history to the full horizon length.
A `pad_mask` key is added to the final observation dictionary that denotes which timesteps
are padding.
"""
def __init__(self, env: gym.Env, horizon: int):
super().__init__(env)
self.horizon = horizon
self.history = deque(maxlen=self.horizon)
self.num_obs = 0
self.observation_space = space_stack(self.env.observation_space, self.horizon)
def step(self, action):
obs, reward, done, trunc, info = self.env.step(action)
self.num_obs += 1
self.history.append(obs)
assert len(self.history) == self.horizon
full_obs = stack_and_pad(self.history, self.num_obs)
return full_obs, reward, done, trunc, info
def reset(self, **kwargs):
obs, info = self.env.reset(**kwargs)
self.num_obs = 1
self.history.extend([obs] * self.horizon)
full_obs = stack_and_pad(self.history, self.num_obs)
return full_obs, info
# Path: octo/utils/gym_wrappers.py
class RHCWrapper(gym.Wrapper):
"""
Performs receding horizon control. The policy returns `pred_horizon` actions and
we execute `exec_horizon` of them.
"""
def __init__(self, env: gym.Env, exec_horizon: int):
super().__init__(env)
self.exec_horizon = exec_horizon
def step(self, actions):
if self.exec_horizon == 1 and len(actions.shape) == 1:
actions = actions[None]
assert len(actions) >= self.exec_horizon
rewards = []
observations = []
infos = []
for i in range(self.exec_horizon):
obs, reward, done, trunc, info = self.env.step(actions[i])
observations.append(obs)
rewards.append(reward)
infos.append(info)
if done or trunc:
break
infos = listdict2dictlist(infos)
infos["rewards"] = rewards
infos["observations"] = observations
return obs, np.sum(rewards), done, trunc, infos
# Path: octo/utils/gym_wrappers.py
class UnnormalizeActionProprio(gym.ActionWrapper, gym.ObservationWrapper):
"""
Un-normalizes the action and proprio.
"""
def __init__(
self,
env: gym.Env,
action_proprio_metadata: dict,
normalization_type: str,
):
self.action_proprio_metadata = jax.tree_map(
lambda x: np.array(x),
action_proprio_metadata,
is_leaf=lambda x: isinstance(x, list),
)
self.normalization_type = normalization_type
super().__init__(env)
def unnormalize(self, data, metadata):
mask = metadata.get("mask", np.ones_like(metadata["mean"], dtype=bool))
if self.normalization_type == "normal":
return np.where(
mask,
(data * metadata["std"]) + metadata["mean"],
data,
)
elif self.normalization_type == "bounds":
return np.where(
mask,
((data + 1) / 2 * (metadata["max"] - metadata["min"] + 1e-8))
+ metadata["min"],
data,
)
else:
raise ValueError(
f"Unknown action/proprio normalization type: {self.normalization_type}"
)
def normalize(self, data, metadata):
mask = metadata.get("mask", np.ones_like(metadata["mean"], dtype=bool))
if self.normalization_type == "normal":
return np.where(
mask,
(data - metadata["mean"]) / (metadata["std"] + 1e-8),
data,
)
elif self.normalization_type == "bounds":
return np.where(
mask,
np.clip(
2
* (data - metadata["min"])
/ (metadata["max"] - metadata["min"] + 1e-8)
- 1,
-1,
1,
),
data,
)
else:
raise ValueError(
f"Unknown action/proprio normalization type: {self.normalization_type}"
)
def action(self, action):
return self.unnormalize(action, self.action_proprio_metadata["action"])
def observation(self, obs):
obs["proprio"] = self.normalize(
obs["proprio"], self.action_proprio_metadata["proprio"]
)
return obs
# Path: examples/04_eval_finetuned_on_robot.py
from datetime import datetime
from functools import partial
from absl import app, flags, logging
from envs.widowx_env import convert_obs, state_to_eep, wait_for_obs, WidowXGym
from widowx_envs.widowx_env_service import WidowXClient, WidowXConfigs, WidowXStatus
from octo.model.octo_model import OctoModel
from octo.utils.gym_wrappers import HistoryWrapper, RHCWrapper, UnnormalizeActionProprio
import os
import time
import click
import cv2
import imageio
import jax
import jax.numpy as jnp
import numpy as np
"""
This script shows how we evaluated a finetuned Octo model on a real WidowX robot. While the exact specifics may not
be applicable to your use case, this script serves as a didactic example of how to use Octo in a real-world setting.
If you wish, you may reproduce these results by [reproducing the robot setup](https://rail-berkeley.github.io/bridgedata/)
and installing [the robot controller](https://github.com/rail-berkeley/bridge_data_robot)
"""
np.set_printoptions(suppress=True)
logging.set_verbosity(logging.WARNING)
FLAGS = flags.FLAGS
flags.DEFINE_string(
"checkpoint_weights_path", None, "Path to checkpoint", required=True
)
flags.DEFINE_integer("checkpoint_step", None, "Checkpoint step", required=True)
# custom to bridge_data_robot
flags.DEFINE_string("ip", "localhost", "IP address of the robot")
flags.DEFINE_integer("port", 5556, "Port of the robot")
flags.DEFINE_spaceseplist("goal_eep", [0.3, 0.0, 0.15], "Goal position")
flags.DEFINE_spaceseplist("initial_eep", [0.3, 0.0, 0.15], "Initial position")
flags.DEFINE_bool("blocking", False, "Use the blocking controller")
flags.DEFINE_integer("im_size", None, "Image size", required=True)
flags.DEFINE_string("video_save_path", None, "Path to save video")
flags.DEFINE_integer("num_timesteps", 120, "num timesteps")
flags.DEFINE_integer("horizon", 1, "Observation history length")
flags.DEFINE_integer("pred_horizon", 1, "Length of action sequence from model")
flags.DEFINE_integer("exec_horizon", 1, "Length of action sequence to execute")
# show image flag
flags.DEFINE_bool("show_image", False, "Show image")
##############################################################################
STEP_DURATION_MESSAGE = """
Bridge data was collected with non-blocking control and a step duration of 0.2s.
However, we relabel the actions to make it look like the data was collected with
blocking control and we evaluate with blocking control.
| We also use a step duration of 0.4s to reduce the jerkiness of the policy. |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: LinShan-Bin/OccNeRF
# Path: utils/geom.py
def eye_4x4(B, device='cuda'):
def safe_inverse(a): #parallel version
def safe_inverse_single(a):
def apply_4x4(RT, xyz):
def get_camM_T_camXs(origin_T_camXs, ind=0):
def split_rt_single(rt):
def split_rt(rt):
def merge_rt(r, t):
def xyd2pointcloud(xyd, pix_T_cam):
def pixels2camera(x, y, z, fx, fy, x0, y0):
def camera2pixels(xyz, pix_T_cam):
def scale_intrinsics(K, sx, sy):
def split_intrinsics(K):
def merge_intrinsics(fx, fy, x0, y0):
def merge_rtlist(rlist, tlist):
def split_lrtlist(lrtlist):
def merge_lrtlist(lenlist, rtlist):
def apply_4x4_to_lrtlist(Y_T_X, lrtlist_X):
def apply_4x4_to_lrt(Y_T_X, lrt_X):
def get_xyzlist_from_lenlist(lenlist):
def get_xyzlist_from_lrtlist(lrtlist, include_clist=False):
def get_clist_from_lrtlist(lrtlist):
def wrap2pi(rad_angle):
def unproject(cam2world, intrinsic, depth):
def reproject(cam2world_src, cam2world_tar, W, H, intrinsic, depth_src, depth_tar, color_tar, mask_tar):
def make_grid(x, y):
def visualize_depth(depth, mask=None, depth_min=None, depth_max=None, direct=False):
def mat2pose_vec(matrix: torch.Tensor):
def square_distance(src, dst):
B, _, _ = list(a.shape)
B, N, _ = list(xyz.shape)
B, S = list(origin_T_camXs.shape)[0:2]
B, C, D = list(r.shape)
B2, D2 = list(t.shape)
B, N, C = list(xyd.shape)
B = x.shape[0]
B = list(z.shape)[0]
EPS = 1e-4
K = merge_intrinsics(fx, fy, x0, y0)
B = list(fx.shape)[0]
K = torch.zeros(B, 4, 4, dtype=torch.float32, device=fx.device)
K[:,0,0] = fx
K[:,1,1] = fy
K[:,0,2] = x0
K[:,1,2] = y0
K[:,2,2] = 1.0
K[:,3,3] = 1.0
B, N, D, E = list(rlist.shape)
B, N, F = list(tlist.shape)
B, N, D = list(lrtlist.shape)
B, N, D = list(lenlist.shape)
B2, N2, E, F = list(rtlist.shape)
B, N, D = list(lrtlist_X.shape)
B2, E, F = list(Y_T_X.shape)
B, D = list(lrt_X.shape)
B2, E, F = list(Y_T_X.shape)
B, N, D = list(lenlist.shape)
B, N, D = list(lrtlist.shape)
B, N, D = list(lrtlist.shape)
B, N, _ = src.shape
_, M, _ = dst.shape
# Path: utils/vox.py
def world2contracted(xyz_world, pc_range_roi=[-52, -52, 0, 52, 52, 6], ratio=0.8):
def contracted2world(xyz_contracted, pc_range_roi=[-80, -80, -3, 80, 80, 8], ratio=0.8):
def __init__(self, Z, Y, X, scene_centroid, bounds, position = 'embedding', length_pose_encoding = 3, opt = None, pad=None, assert_cube=False):
def Ref2Mem(self, xyz, Z, Y, X, assert_cube=False):
def Mem2Ref(self, xyz_mem, Z, Y, X, assert_cube=False):
def get_mem_T_ref(self, B, Z, Y, X, assert_cube=False, device='cuda'):
def get_ref_T_mem(self, B, Z, Y, X, assert_cube=False, device='cuda'):
def get_inbounds(self, xyz, Z, Y, X, already_mem=False, padding=0.0, assert_cube=False):
def voxelize_xyz(self, xyz_ref, Z, Y, X, already_mem=False, assert_cube=False, clean_eps=0):
def voxelize_xyz_and_feats(self, xyz_ref, feats, Z, Y, X, already_mem=False, assert_cube=False, clean_eps=0):
def get_occupancy(self, xyz, Z, Y, X, clean_eps=0, xyz_zero=None):
def get_feat_occupancy(self, xyz, feat, Z, Y, X, clean_eps=0, xyz_zero=None):
def unproject_image_to_mem(self, rgb_camB, pixB_T_camA, camB_T_camA, Z, Y, X, assert_cube=False):
def get_meta_data(self, cam_center, camB_T_camA = None, abs_position=False, assert_cube=False):
def get_voxel_position(self, cam_center, abs_position=True, assert_cube=False):
def apply_mem_T_ref_to_lrtlist(self, lrtlist_cam, Z, Y, X, assert_cube=False):
class Vox_util(nn.Module):
B, N, C = list(xyz.shape)
B, N, C = list(xyz_mem.shape)
B, N, D = list(xyz_ref.shape)
B, N, D = list(xyz_ref.shape)
B2, N2, D2 = list(feats.shape)
B, N, C = list(xyz.shape)
B, N, C = list(xyz.shape)
B2, N2, D2 = list(feat.shape)
B, C, H, W = list(rgb_camB.shape)
EPS=1e-6
Z, Y, X = self.Z, self.Y, self.X
Z, Y, X = self.Z, self.Y, self.X
B, N, C = list(lrtlist_cam.shape)
# Path: utils/basic.py
EPS = 1e-6
B_, S = shapelist[:2]
BS = shapelist[0]
S = int(BS/B)
def strnum(x):
def matmul2(mat1, mat2):
def pack_seqdim(tensor, B):
def unpack_seqdim(tensor, B):
def reduce_masked_mean(x, mask, dim=None, keepdim=False):
def meshgrid3d(B, Z, Y, X, stack=False, norm=False, device='cuda'):
def gridcloud3d(B, Z, Y, X, norm=False, device='cuda'):
def normalize_grid2d(grid_y, grid_x, Y, X, clamp_extreme=True):
# Path: utils/render.py
def get_rays(H, W, K, c2w, inverse_y, flip_x, flip_y, mode='center'):
def ndc_rays(H, W, focal, near, rays_o, rays_d):
def get_rays_of_a_view(H, W, K, c2w, ndc, inverse_y, flip_x, flip_y, mode='center'):
def cumprod_exclusive(p):
def get_ray_marching_ray(alpha):
def sample_ray(self, rays_o, rays_d, near, far, stepsize, xyz_min, xyz_max, voxel_size, is_train=False):
def __init__(self, init_val, beta_min=0.0001):
def forward(self, sdf, beta=None):
def get_beta(self):
def __init__(self, init_val, beta_min=0.0001):
def forward(self, sdf, beta=None):
def get_beta(self):
def __init__(self, init_val):
def forward(self, x):
def get_variance(self):
class SigmoidDensity(nn.Module): # alpha * Laplace(loc=0, scale=beta).cdf(-sdf)
class LaplaceDensity(nn.Module): # alpha * Laplace(loc=0, scale=beta).cdf(-sdf)
class SingleVarianceNetwork(nn.Module):
# Path: networks/_3DCNN.py
class S3DCNN(nn.Module):
def __init__(self, input_planes = 64, out_planes = 1, planes = 16, conv_3d_types1 = "3D", activate_fun = nn.ReLU(inplace=True), opt = None):
super(S3DCNN, self).__init__()
self.out_planes = out_planes
self.opt = opt
self.dres0 = nn.Sequential(convbn_3d(input_planes, planes*2, 3, 1, 1, conv_3d_types = conv_3d_types1),
activate_fun,
convbn_3d(planes*2, planes*2, 3, 1, 1, conv_3d_types = conv_3d_types1),
activate_fun)
self.dres1 = nn.Sequential(convbn_3d(planes*2, planes*2, 3, 1, 1, conv_3d_types = conv_3d_types1),
activate_fun,
convbn_3d(planes*2, planes*2, 3, 1, 1, conv_3d_types = conv_3d_types1))
self.dres2 = hourglass_PSMNet(planes*2, conv_3d_types1 = conv_3d_types1, activate_fun = activate_fun)
self.dres3 = hourglass_PSMNet(planes*2, conv_3d_types1 = conv_3d_types1, activate_fun = activate_fun)
self.dres4 = hourglass_PSMNet(planes*2, conv_3d_types1 = conv_3d_types1, activate_fun = activate_fun)
self.classif1 = nn.Sequential(convbn_3d(planes*2, planes*2, 3, 1, 1, conv_3d_types = conv_3d_types1),
activate_fun,
nn.Conv3d(planes*2, out_planes, kernel_size=3, padding=1, stride=1,bias=False))
self.classif2 = nn.Sequential(convbn_3d(planes*2, planes*2, 3, 1, 1, conv_3d_types = conv_3d_types1),
activate_fun,
nn.Conv3d(planes*2, out_planes, kernel_size=3, padding=1, stride=1,bias=False))
self.classif3 = nn.Sequential(convbn_3d(planes * 2, planes * 2, 3, 1, 1, conv_3d_types=conv_3d_types1),
activate_fun,
convbn_3d(planes * 2, planes * 2, 3, 1, 1, conv_3d_types=conv_3d_types1),
activate_fun,
convbn_3d(planes * 2, planes * 2, 3, 1, 1, conv_3d_types=conv_3d_types1),
activate_fun,
convbn_3d(planes * 2, planes * 2, 3, 1, 1, conv_3d_types=conv_3d_types1),
activate_fun,
convbn_3d(planes * 2, self.out_planes, 3, 1, 1, conv_3d_types=conv_3d_types1),)
if self.opt.use_semantic:
self.classif_semantic = nn.Sequential(convbn_3d(planes * 2, planes * 2, 3, 1, 1, conv_3d_types=conv_3d_types1),
activate_fun,
convbn_3d(planes * 2, planes * 2, 3, 1, 1, conv_3d_types=conv_3d_types1),
activate_fun,
convbn_3d(planes * 2, planes * 2, 3, 1, 1, conv_3d_types=conv_3d_types1),
activate_fun,
convbn_3d(planes * 2, planes * 2, 3, 1, 1, conv_3d_types=conv_3d_types1),
activate_fun,
convbn_3d(planes * 2, self.opt.semantic_classes, 3, 1, 1, conv_3d_types=conv_3d_types1),)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.Conv3d):
n = m.kernel_size[0] * m.kernel_size[1]*m.kernel_size[2] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
if self.opt.render_type == 'density':
pass
def geo_param(self):
return list(self.dres0.parameters()) + \
list(self.dres1.parameters()) + \
list(self.dres2.parameters()) + \
list(self.dres3.parameters()) + \
list(self.dres4.parameters()) + \
list(self.classif1.parameters()) + \
list(self.classif2.parameters()) + \
list(self.classif3.parameters())
def sem_head_param(self):
if self.opt.use_semantic:
return self.classif_semantic.parameters()
else:
return None
def forward(self, cost):
cost0 = self.dres0(cost)
cost0 = self.dres1(cost0) + cost0
out1, pre1, post1 = self.dres2(cost0, None, None)
out1 = out1+cost0
out2, pre2, post2 = self.dres3(out1, pre1, post1)
out2 = out2+cost0
out3, pre3, post3 = self.dres4(out2, pre1, post2)
if self.opt.use_semantic:
if self.opt.last_free:
out = self.classif_semantic(out3)
else:
semantic = self.classif_semantic(out3)
cost3 = self.classif3(out3)
out = torch.cat([semantic, cost3], dim=1)
return [out]
else:
cost3 = self.classif3(out3)
return [cost3]
# Path: networks/occupancy_decoder.py
import pdb
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch_efficient_distloss import eff_distloss, eff_distloss_native
from utils import geom
from utils import vox
from utils import basic
from utils import render
from ._3DCNN import S3DCNN
# This software is licensed under the terms of the Monodepth2 licence
# which allows for non-commercial use only, the full terms of which are made
# available in the LICENSE file.
from __future__ import absolute_import, division, print_function
class VolumeDecoder(nn.Module):
def __init__(self, opt):
super(VolumeDecoder, self).__init__()
self.opt = opt
self.use_semantic = self.opt.use_semantic
self.semantic_classes = self.opt.semantic_classes
self.batch = self.opt.batch_size // self.opt.cam_N
self.near = self.opt.min_depth
self.far = self.opt.max_depth
self.register_buffer('xyz_min', torch.from_numpy(
np.array([self.opt.real_size[0], self.opt.real_size[2], self.opt.real_size[4]])))
self.register_buffer('xyz_max', torch.from_numpy(
np.array([self.opt.real_size[1], self.opt.real_size[3], self.opt.real_size[5]])))
self.ZMAX = self.opt.real_size[1]
self.Z = self.opt.voxels_size[0]
self.Y = self.opt.voxels_size[1]
self.X = self.opt.voxels_size[2]
self.Z_final = self.Z
self.Y_final = self.Y
self.X_final = self.X
self.stepsize = self.opt.stepsize # voxel
self.num_voxels = self.Z_final * self.Y_final * self.X_final
self.stepsize_log = self.stepsize
self.interval = self.stepsize
if self.opt.contracted_coord:
# Sampling strategy for contracted coordinate
contracted_rate = self.opt.contracted_ratio
num_id_voxels = int(self.num_voxels * (contracted_rate)**3)
self.voxel_size = ((self.xyz_max - self.xyz_min).prod() / num_id_voxels).pow(1 / 3)
diagonal = (self.xyz_max - self.xyz_min).pow(2).sum().pow(1 / 2)
self.N_samples = int(diagonal / 2 / self.stepsize / self.voxel_size / contracted_rate)
if self.opt.infinite_range:
# depth_roi = [-self.far] * 3 + [self.far] * 3
zval_roi = [-diagonal] * 3 + [diagonal] * 3
fc = 1 - 0.5 / self.X # avoid NaN
zs_contracted = torch.linspace(0.0, fc, steps=self.N_samples)
zs_world = vox.contracted2world(
zs_contracted[None, :, None].repeat(1, 1, 3),
# pc_range_roi=depth_roi,
pc_range_roi=zval_roi,
ratio=self.opt.contracted_ratio)[:, :, 0]
else:
zs_world = torch.linspace(0.0, self.N_samples - 1, steps=self.N_samples)[None] * self.stepsize * self.voxel_size
self.register_buffer('Zval', zs_world)
pc_range_roi = self.xyz_min.tolist() + self.xyz_max.tolist()
self.norm_func = lambda xyz: vox.world2contracted(xyz, pc_range_roi=pc_range_roi, ratio=self.opt.contracted_ratio)
else:
self.N_samples = int(np.linalg.norm(np.array([self.Z_final // 2, self.Y_final // 2, self.X_final // 2]) + 1) / self.stepsize) + 1
self.voxel_size = ((self.xyz_max - self.xyz_min).prod() / self.num_voxels).pow(1 / 3)
zs_world = torch.linspace(0.0, self.N_samples - 1, steps=self.N_samples)[None] * self.stepsize * self.voxel_size
self.register_buffer('Zval', zs_world)
self.norm_func = lambda xyz: (xyz - self.xyz_min.to(xyz)) / (self.xyz_max.to(xyz) - self.xyz_min.to(xyz)) * 2.0 - 1.0
length_pose_encoding = 3
if self.opt.position == 'embedding':
input_channel = self.opt.input_channel
self.pos_embedding = torch.nn.Parameter(torch.ones(
[1, input_channel, self.opt.voxels_size[1], self.opt.voxels_size[2], self.opt.voxels_size[0]]))
elif self.opt.position == 'embedding1':
input_channel = self.opt.input_channel
xyz_in_channels = 1 + 3
embedding_width = 192
embedding_depth = 5
self.embeddingnet = nn.Sequential(
nn.Linear(xyz_in_channels, embedding_width), nn.ReLU(inplace=True),
*[nn.Sequential(nn.Linear(embedding_width, embedding_width), nn.ReLU(inplace=True))
for _ in range(embedding_depth - 2)], nn.Linear(embedding_width, self.opt.input_channel),)
nn.init.constant_(self.embeddingnet[-1].bias, 0)
self.pos_embedding1 = None
self.pos_embedding_save = torch.nn.Parameter(torch.zeros([1, input_channel, self.opt.voxels_size[1], self.opt.voxels_size[2], self.opt.voxels_size[0]]), requires_grad= False)
else:
self.pos_embedding = None
self.pos_embedding1 = None
input_channel = self.opt.input_channel
scene_centroid_x = 0.0
scene_centroid_y = 0.0
scene_centroid_z = 0.0
scene_centroid = np.array([scene_centroid_x,
scene_centroid_y,
scene_centroid_z]).reshape([1, 3])
self.register_buffer('scene_centroid', torch.from_numpy(scene_centroid).float())
self.bounds = (self.opt.real_size[0], self.opt.real_size[1],
self.opt.real_size[2], self.opt.real_size[3],
self.opt.real_size[4], self.opt.real_size[5])
# bounds = (-40, 40, -40, 40, -1, 5.4)
self.vox_util = vox.Vox_util(
| self.Z, self.Y, self.X, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Kevin-thu/DiffMorpher
# Path: utils/model_utils.py
def get_img(img, resolution=512):
norm_mean = [0.5, 0.5, 0.5]
norm_std = [0.5, 0.5, 0.5]
transform = transforms.Compose([
transforms.Resize((resolution, resolution)),
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std)
])
img = transform(img)
return img.unsqueeze(0)
# Path: utils/model_utils.py
@torch.no_grad()
def slerp(p0, p1, fract_mixing: float, adain=True):
r""" Copied from lunarring/latentblending
Helper function to correctly mix two random variables using spherical interpolation.
The function will always cast up to float64 for sake of extra 4.
Args:
p0:
First tensor for interpolation
p1:
Second tensor for interpolation
fract_mixing: float
Mixing coefficient of interval [0, 1].
0 will return in p0
1 will return in p1
0.x will return a mix between both preserving angular velocity.
"""
if p0.dtype == torch.float16:
recast_to = 'fp16'
else:
recast_to = 'fp32'
p0 = p0.double()
p1 = p1.double()
if adain:
mean1, std1 = calc_mean_std(p0)
mean2, std2 = calc_mean_std(p1)
mean = mean1 * (1 - fract_mixing) + mean2 * fract_mixing
std = std1 * (1 - fract_mixing) + std2 * fract_mixing
norm = torch.linalg.norm(p0) * torch.linalg.norm(p1)
epsilon = 1e-7
dot = torch.sum(p0 * p1) / norm
dot = dot.clamp(-1+epsilon, 1-epsilon)
theta_0 = torch.arccos(dot)
sin_theta_0 = torch.sin(theta_0)
theta_t = theta_0 * fract_mixing
s0 = torch.sin(theta_0 - theta_t) / sin_theta_0
s1 = torch.sin(theta_t) / sin_theta_0
interp = p0*s0 + p1*s1
if adain:
interp = F.instance_norm(interp) * std + mean
if recast_to == 'fp16':
interp = interp.half()
elif recast_to == 'fp32':
interp = interp.float()
return interp
# Path: utils/model_utils.py
def do_replace_attn(key: str):
# return key.startswith('up_blocks.2') or key.startswith('up_blocks.3')
return key.startswith('up')
# Path: utils/lora_utils.py
def train_lora(image, prompt, save_lora_dir, model_path=None, tokenizer=None, text_encoder=None, vae=None, unet=None, noise_scheduler=None, lora_steps=200, lora_lr=2e-4, lora_rank=16, weight_name=None, safe_serialization=False, progress=tqdm):
# initialize accelerator
accelerator = Accelerator(
gradient_accumulation_steps=1,
# mixed_precision='fp16'
)
set_seed(0)
# Load the tokenizer
if tokenizer is None:
tokenizer = AutoTokenizer.from_pretrained(
model_path,
subfolder="tokenizer",
revision=None,
use_fast=False,
)
# initialize the model
if noise_scheduler is None:
noise_scheduler = DDPMScheduler.from_pretrained(model_path, subfolder="scheduler")
if text_encoder is None:
text_encoder_cls = import_model_class_from_model_name_or_path(model_path, revision=None)
text_encoder = text_encoder_cls.from_pretrained(
model_path, subfolder="text_encoder", revision=None
)
if vae is None:
vae = AutoencoderKL.from_pretrained(
model_path, subfolder="vae", revision=None
)
if unet is None:
unet = UNet2DConditionModel.from_pretrained(
model_path, subfolder="unet", revision=None
)
# set device and dtype
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
vae.requires_grad_(False)
text_encoder.requires_grad_(False)
unet.requires_grad_(False)
unet.to(device)
vae.to(device)
text_encoder.to(device)
# initialize UNet LoRA
unet_lora_attn_procs = {}
for name, attn_processor in unet.attn_processors.items():
cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
if name.startswith("mid_block"):
hidden_size = unet.config.block_out_channels[-1]
elif name.startswith("up_blocks"):
block_id = int(name[len("up_blocks.")])
hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
elif name.startswith("down_blocks"):
block_id = int(name[len("down_blocks.")])
hidden_size = unet.config.block_out_channels[block_id]
else:
raise NotImplementedError("name must start with up_blocks, mid_blocks, or down_blocks")
if isinstance(attn_processor, (AttnAddedKVProcessor, SlicedAttnAddedKVProcessor, AttnAddedKVProcessor2_0)):
lora_attn_processor_class = LoRAAttnAddedKVProcessor
else:
lora_attn_processor_class = (
LoRAAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else LoRAAttnProcessor
)
unet_lora_attn_procs[name] = lora_attn_processor_class(
hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, rank=lora_rank
)
unet.set_attn_processor(unet_lora_attn_procs)
unet_lora_layers = AttnProcsLayers(unet.attn_processors)
# Optimizer creation
params_to_optimize = (unet_lora_layers.parameters())
optimizer = torch.optim.AdamW(
params_to_optimize,
lr=lora_lr,
betas=(0.9, 0.999),
weight_decay=1e-2,
eps=1e-08,
)
lr_scheduler = get_scheduler(
"constant",
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=lora_steps,
num_cycles=1,
power=1.0,
)
# prepare accelerator
unet_lora_layers = accelerator.prepare_model(unet_lora_layers)
optimizer = accelerator.prepare_optimizer(optimizer)
lr_scheduler = accelerator.prepare_scheduler(lr_scheduler)
# initialize text embeddings
with torch.no_grad():
text_inputs = tokenize_prompt(tokenizer, prompt, tokenizer_max_length=None)
text_embedding = encode_prompt(
text_encoder,
text_inputs.input_ids,
text_inputs.attention_mask,
text_encoder_use_attention_mask=False
)
if type(image) == np.ndarray:
image = Image.fromarray(image)
# initialize latent distribution
image_transforms = transforms.Compose(
[
transforms.Resize(512, interpolation=transforms.InterpolationMode.BILINEAR),
# transforms.RandomCrop(512),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
image = image_transforms(image).to(device)
image = image.unsqueeze(dim=0)
latents_dist = vae.encode(image).latent_dist
for _ in progress.tqdm(range(lora_steps), desc="Training LoRA..."):
unet.train()
model_input = latents_dist.sample() * vae.config.scaling_factor
# Sample noise that we'll add to the latents
noise = torch.randn_like(model_input)
bsz, channels, height, width = model_input.shape
# Sample a random timestep for each image
timesteps = torch.randint(
0, noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device
)
timesteps = timesteps.long()
# Add noise to the model input according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps)
# Predict the noise residual
model_pred = unet(noisy_model_input, timesteps, text_embedding).sample
# Get the target for loss depending on the prediction type
if noise_scheduler.config.prediction_type == "epsilon":
target = noise
elif noise_scheduler.config.prediction_type == "v_prediction":
target = noise_scheduler.get_velocity(model_input, noise, timesteps)
else:
raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# save the trained lora
# unet = unet.to(torch.float32)
# vae = vae.to(torch.float32)
# text_encoder = text_encoder.to(torch.float32)
# unwrap_model is used to remove all special modules added when doing distributed training
# so here, there is no need to call unwrap_model
# unet_lora_layers = accelerator.unwrap_model(unet_lora_layers)
LoraLoaderMixin.save_lora_weights(
save_directory=save_lora_dir,
unet_lora_layers=unet_lora_layers,
text_encoder_lora_layers=None,
weight_name=weight_name,
safe_serialization=safe_serialization
)
# Path: utils/lora_utils.py
def load_lora(unet, lora_0, lora_1, alpha):
lora = {}
for key in lora_0:
lora[key] = (1 - alpha) * lora_0[key] + alpha * lora_1[key]
unet.load_attn_procs(lora)
return unet
# Path: utils/alpha_scheduler.py
class AlphaScheduler:
def __init__(self):
...
def from_imgs(self, imgs):
self.__num_values = len(imgs)
self.__values = [0]
for i in range(self.__num_values - 1):
dis = distance(imgs[i], imgs[i + 1])
self.__values.append(dis)
self.__values[i + 1] += self.__values[i]
for i in range(self.__num_values):
self.__values[i] /= self.__values[-1]
def save(self, filename):
torch.save(torch.tensor(self.__values), filename)
def load(self, filename):
self.__values = torch.load(filename).tolist()
self.__num_values = len(self.__values)
def get_x(self, y):
assert y >= 0 and y <= 1
id = bisect.bisect_left(self.__values, y)
id -= 1
if id < 0:
id = 0
yl = self.__values[id]
yr = self.__values[id + 1]
xl = id * (1 / (self.__num_values - 1))
xr = (id + 1) * (1 / (self.__num_values - 1))
x = (y - yl) / (yr - yl) * (xr - xl) + xl
return x
def get_list(self, len=None):
if len is None:
len = self.__num_values
ys = torch.linspace(0, 1, len)
res = [self.get_x(y) for y in ys]
return res
# Path: model.py
import os
import torch
import torch.nn.functional as F
import tqdm
import numpy as np
import safetensors
from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.models.attention_processor import AttnProcessor
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import KarrasDiffusionSchedulers
from PIL import Image
from torchvision import transforms
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import StableDiffusionPipeline
from argparse import ArgumentParser
from utils.model_utils import get_img, slerp, do_replace_attn
from utils.lora_utils import train_lora, load_lora
from utils.alpha_scheduler import AlphaScheduler
class StoreProcessor():
def __init__(self, original_processor, value_dict, name):
self.original_processor = original_processor
self.value_dict = value_dict
self.name = name
self.value_dict[self.name] = dict()
self.id = 0
def __call__(self, attn, hidden_states, *args, encoder_hidden_states=None, attention_mask=None, **kwargs):
# Is self attention
if encoder_hidden_states is None:
self.value_dict[self.name][self.id] = hidden_states.detach()
self.id += 1
res = self.original_processor(attn, hidden_states, *args,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
**kwargs)
return res
class LoadProcessor():
def __init__(self, original_processor, name, img0_dict, img1_dict, alpha, beta=0, lamd=0.6):
super().__init__()
self.original_processor = original_processor
self.name = name
self.img0_dict = img0_dict
self.img1_dict = img1_dict
self.alpha = alpha
self.beta = beta
self.lamd = lamd
self.id = 0
def parent_call(
self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, scale=1.0
):
residual = hidden_states
if attn.spatial_norm is not None:
hidden_states = attn.spatial_norm(hidden_states)
input_ndim = hidden_states.ndim
if input_ndim == 4:
batch_size, channel, height, width = hidden_states.shape
hidden_states = hidden_states.view(
batch_size, channel, height * width).transpose(1, 2)
batch_size, sequence_length, _ = (
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
)
attention_mask = attn.prepare_attention_mask(
attention_mask, sequence_length, batch_size)
if attn.group_norm is not None:
hidden_states = attn.group_norm(
hidden_states.transpose(1, 2)).transpose(1, 2)
query = attn.to_q(hidden_states) + scale * \
self.original_processor.to_q_lora(hidden_states)
query = attn.head_to_batch_dim(query)
if encoder_hidden_states is None:
encoder_hidden_states = hidden_states
elif attn.norm_cross:
encoder_hidden_states = attn.norm_encoder_hidden_states(
encoder_hidden_states)
key = attn.to_k(encoder_hidden_states) + scale * \
self.original_processor.to_k_lora(encoder_hidden_states)
value = attn.to_v(encoder_hidden_states) + scale * \
self.original_processor.to_v_lora(encoder_hidden_states)
key = attn.head_to_batch_dim(key)
value = attn.head_to_batch_dim(value)
attention_probs = attn.get_attention_scores(
query, key, attention_mask)
hidden_states = torch.bmm(attention_probs, value)
hidden_states = attn.batch_to_head_dim(hidden_states)
# linear proj
hidden_states = attn.to_out[0](
hidden_states) + scale * self.original_processor.to_out_lora(hidden_states)
# dropout
hidden_states = attn.to_out[1](hidden_states)
if input_ndim == 4:
hidden_states = hidden_states.transpose(
-1, -2).reshape(batch_size, channel, height, width)
| if attn.residual_connection: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: modelscope/richdreamer
# Path: threestudio/models/prompt_processors/base.py
class PromptProcessor(BaseObject):
@dataclass
class Config(BaseObject.Config):
prompt: str = "a hamburger"
# manually assigned view-dependent prompts
prompt_front: Optional[str] = None
prompt_side: Optional[str] = None
prompt_back: Optional[str] = None
prompt_overhead: Optional[str] = None
negative_prompt: str = ""
pretrained_model_name_or_path: str = "runwayml/stable-diffusion-v1-5"
overhead_threshold: float = 60.0
front_threshold: float = 45.0
back_threshold: float = 45.0
view_dependent_prompt_front: bool = False
use_cache: bool = True
spawn: bool = True
# perp neg
use_perp_neg: bool = False
# a*e(-b*r) + c
# a * e(-b) + c = 0
perp_neg_f_sb: Tuple[float, float, float] = (1, 0.5, -0.606)
perp_neg_f_fsb: Tuple[float, float, float] = (1, 0.5, +0.967)
perp_neg_f_fs: Tuple[float, float, float] = (
4,
0.5,
-2.426,
) # f_fs(1) = 0, a, b > 0
perp_neg_f_sf: Tuple[float, float, float] = (4, 0.5, -2.426)
# prompt debiasing
use_prompt_debiasing: bool = False
pretrained_model_name_or_path_prompt_debiasing: str = "bert-base-uncased"
# index of words that can potentially be removed
prompt_debiasing_mask_ids: Optional[List[int]] = None
cfg: Config
@rank_zero_only
def configure_text_encoder(self) -> None:
raise NotImplementedError
@rank_zero_only
def destroy_text_encoder(self) -> None:
raise NotImplementedError
def configure(self) -> None:
self._cache_dir = ".threestudio_cache/text_embeddings" # FIXME: hard-coded path
# view-dependent text embeddings
self.directions: List[DirectionConfig]
if self.cfg.view_dependent_prompt_front:
self.directions = [
DirectionConfig(
"side",
lambda s: f"side view of {s}",
lambda s: s,
lambda ele, azi, dis: torch.ones_like(ele, dtype=torch.bool),
),
DirectionConfig(
"front",
lambda s: f"front view of {s}",
lambda s: s,
lambda ele, azi, dis: (
shift_azimuth_deg(azi) > -self.cfg.front_threshold
)
& (shift_azimuth_deg(azi) < self.cfg.front_threshold),
),
DirectionConfig(
"back",
lambda s: f"backside view of {s}",
lambda s: s,
lambda ele, azi, dis: (
shift_azimuth_deg(azi) > 180 - self.cfg.back_threshold
)
| (shift_azimuth_deg(azi) < -180 + self.cfg.back_threshold),
),
DirectionConfig(
"overhead",
lambda s: f"overhead view of {s}",
lambda s: s,
lambda ele, azi, dis: ele > self.cfg.overhead_threshold,
),
]
else:
self.directions = [
DirectionConfig(
"side",
lambda s: f"{s}, side view",
lambda s: s,
lambda ele, azi, dis: torch.ones_like(ele, dtype=torch.bool),
),
DirectionConfig(
"front",
lambda s: f"{s}, front view",
lambda s: s,
lambda ele, azi, dis: (
shift_azimuth_deg(azi) > -self.cfg.front_threshold
)
& (shift_azimuth_deg(azi) < self.cfg.front_threshold),
),
DirectionConfig(
"back",
lambda s: f"{s}, back view",
lambda s: s,
lambda ele, azi, dis: (
shift_azimuth_deg(azi) > 180 - self.cfg.back_threshold
)
| (shift_azimuth_deg(azi) < -180 + self.cfg.back_threshold),
),
DirectionConfig(
"overhead",
lambda s: f"{s}, overhead view",
lambda s: s,
lambda ele, azi, dis: ele > self.cfg.overhead_threshold,
),
]
self.direction2idx = {d.name: i for i, d in enumerate(self.directions)}
with open(os.path.join("load/prompt_library.json"), "r") as f:
self.prompt_library = json.load(f)
# use provided prompt or find prompt in library
self.prompt = self.preprocess_prompt(self.cfg.prompt)
# use provided negative prompt
self.negative_prompt = self.cfg.negative_prompt
if hasattr(self.cfg, "prefix"):
self.prompt = self.cfg.prefix + self.prompt
threestudio.info(
f"Using prompt [{self.prompt}] and negative prompt [{self.negative_prompt}]"
)
# view-dependent prompting
if self.cfg.use_prompt_debiasing:
assert (
self.cfg.prompt_side is None
and self.cfg.prompt_back is None
and self.cfg.prompt_overhead is None
), "Do not manually assign prompt_side, prompt_back or prompt_overhead when using prompt debiasing"
prompts = self.get_debiased_prompt(self.prompt)
self.prompts_vd = [
d.prompt(prompt) for d, prompt in zip(self.directions, prompts)
]
else:
self.prompts_vd = [
self.cfg.get(f"prompt_{d.name}", None) or d.prompt(self.prompt) # type: ignore
for d in self.directions
]
prompts_vd_display = " ".join(
[
f"[{d.name}]:[{prompt}]"
for prompt, d in zip(self.prompts_vd, self.directions)
]
)
threestudio.info(f"Using view-dependent prompts {prompts_vd_display}")
self.negative_prompts_vd = [
d.negative_prompt(self.negative_prompt) for d in self.directions
]
self.prepare_text_embeddings()
self.load_text_embeddings()
@staticmethod
def spawn_func(pretrained_model_name_or_path, prompts, cache_dir, device):
raise NotImplementedError
@rank_zero_only
def prepare_text_embeddings(self):
os.makedirs(self._cache_dir, exist_ok=True)
all_prompts = (
[self.prompt]
+ [self.negative_prompt]
+ self.prompts_vd
+ self.negative_prompts_vd
)
prompts_to_process = []
for prompt in all_prompts:
if self.cfg.use_cache:
# some text embeddings are already in cache
# do not process them
cache_path = os.path.join(
self._cache_dir,
f"{hash_prompt(self.cfg.pretrained_model_name_or_path, prompt)}.pt",
)
if os.path.exists(cache_path):
threestudio.debug(
f"Text embeddings for model {self.cfg.pretrained_model_name_or_path} and prompt [{prompt}] are already in cache, skip processing."
)
continue
prompts_to_process.append(prompt)
if len(prompts_to_process) > 0:
if self.cfg.spawn:
ctx = mp.get_context("spawn")
subprocess = ctx.Process(
target=self.spawn_func,
args=(
self.cfg.pretrained_model_name_or_path,
prompts_to_process,
self._cache_dir,
self.device,
),
)
subprocess.start()
subprocess.join()
else:
self.spawn_func(
self.cfg.pretrained_model_name_or_path,
prompts_to_process,
self._cache_dir,
self.device,
)
cleanup()
def load_text_embeddings(self):
# synchronize, to ensure the text embeddings have been computed and saved to cache
barrier()
self.text_embeddings = self.load_from_cache(self.prompt)[None, ...]
self.uncond_text_embeddings = self.load_from_cache(self.negative_prompt)[
None, ...
]
self.text_embeddings_vd = torch.stack(
[self.load_from_cache(prompt) for prompt in self.prompts_vd], dim=0
)
self.uncond_text_embeddings_vd = torch.stack(
[self.load_from_cache(prompt) for prompt in self.negative_prompts_vd], dim=0
)
threestudio.debug(f"Loaded text embeddings.")
def load_from_cache(self, prompt):
cache_path = os.path.join(
self._cache_dir,
f"{hash_prompt(self.cfg.pretrained_model_name_or_path, prompt)}.pt",
)
if not os.path.exists(cache_path):
raise FileNotFoundError(
f"Text embedding file {cache_path} for model {self.cfg.pretrained_model_name_or_path} and prompt [{prompt}] not found."
)
return torch.load(cache_path, map_location=self.device)
def preprocess_prompt(self, prompt: str) -> str:
if prompt.startswith("lib:"):
# find matches in the library
candidate = None
keywords = prompt[4:].lower().split("_")
for prompt in self.prompt_library["dreamfusion"]:
if all([k in prompt.lower() for k in keywords]):
if candidate is not None:
raise ValueError(
f"Multiple prompts matched with keywords {keywords} in library"
)
candidate = prompt
if candidate is None:
raise ValueError(
f"Cannot find prompt with keywords {keywords} in library"
)
threestudio.info("Find matched prompt in library: " + candidate)
return candidate
else:
return prompt
def get_text_embeddings(
self, prompt: Union[str, List[str]], negative_prompt: Union[str, List[str]]
) -> Tuple[Float[Tensor, "B ..."], Float[Tensor, "B ..."]]:
raise NotImplementedError
def get_debiased_prompt(self, prompt: str) -> List[str]:
os.environ["TOKENIZERS_PARALLELISM"] = "false"
tokenizer = AutoTokenizer.from_pretrained(
self.cfg.pretrained_model_name_or_path_prompt_debiasing
)
model = BertForMaskedLM.from_pretrained(
self.cfg.pretrained_model_name_or_path_prompt_debiasing
)
views = [d.name for d in self.directions]
view_ids = tokenizer(" ".join(views), return_tensors="pt").input_ids[0]
view_ids = view_ids[1:5]
def modulate(prompt):
prompt_vd = f"This image is depicting a [MASK] view of {prompt}"
tokens = tokenizer(
prompt_vd,
padding="max_length",
truncation=True,
add_special_tokens=True,
return_tensors="pt",
)
mask_idx = torch.where(tokens.input_ids == tokenizer.mask_token_id)[1]
logits = model(**tokens).logits
logits = F.softmax(logits[0, mask_idx], dim=-1)
logits = logits[0, view_ids]
probes = logits / logits.sum()
return probes
prompts = [prompt.split(" ") for _ in range(4)]
full_probe = modulate(prompt)
n_words = len(prompt.split(" "))
prompt_debiasing_mask_ids = (
self.cfg.prompt_debiasing_mask_ids
if self.cfg.prompt_debiasing_mask_ids is not None
else list(range(n_words))
)
words_to_debias = [prompt.split(" ")[idx] for idx in prompt_debiasing_mask_ids]
threestudio.info(f"Words that can potentially be removed: {words_to_debias}")
for idx in prompt_debiasing_mask_ids:
words = prompt.split(" ")
prompt_ = " ".join(words[:idx] + words[(idx + 1) :])
part_probe = modulate(prompt_)
pmi = full_probe / torch.lerp(part_probe, full_probe, 0.5)
for i in range(pmi.shape[0]):
if pmi[i].item() < 0.95:
prompts[i][idx] = ""
debiased_prompts = [" ".join([word for word in p if word]) for p in prompts]
for d, debiased_prompt in zip(views, debiased_prompts):
threestudio.info(f"Debiased prompt of the {d} view is [{debiased_prompt}]")
del tokenizer, model
cleanup()
return debiased_prompts
def __call__(self) -> PromptProcessorOutput:
return PromptProcessorOutput(
text_embeddings=self.text_embeddings,
uncond_text_embeddings=self.uncond_text_embeddings,
text_embeddings_vd=self.text_embeddings_vd,
uncond_text_embeddings_vd=self.uncond_text_embeddings_vd,
directions=self.directions,
direction2idx=self.direction2idx,
use_perp_neg=self.cfg.use_perp_neg,
perp_neg_f_sb=self.cfg.perp_neg_f_sb,
perp_neg_f_fsb=self.cfg.perp_neg_f_fsb,
perp_neg_f_fs=self.cfg.perp_neg_f_fs,
perp_neg_f_sf=self.cfg.perp_neg_f_sf,
)
# Path: threestudio/models/prompt_processors/base.py
def hash_prompt(model: str, prompt: str) -> str:
import hashlib
identifier = f"{model}-{prompt}"
return hashlib.md5(identifier.encode()).hexdigest()
# Path: threestudio/utils/misc.py
def cleanup():
gc.collect()
torch.cuda.empty_cache()
tcnn.free_temporary_memory()
# Path: threestudio/models/prompt_processors/wanx_prompt_processor.py
import json
import os
import torch
import torch.nn as nn
import threestudio
from dataclasses import dataclass
from transformers import AutoTokenizer, CLIPTextModel
from extern.wanx.atom import data, models, ops
from extern.wanx.config import cfg
from threestudio.models.prompt_processors.base import (PromptProcessor,
hash_prompt,)
from threestudio.utils.misc import cleanup
from threestudio.utils.typing import *
def encode_text(m, tokens):
b, s = tokens.shape
mask = tokens.ne(m.pad_token).long()
# embeddings
x = (
m.token_embedding(tokens)
+ m.type_embedding(torch.zeros_like(tokens))
+ m.pos_embedding(m.pad_token + torch.cumsum(mask, dim=1) * mask)
)
x = m.norm(x)
x = m.dropout(x)
# blocks
for block in m.blocks[:-1]:
x = block(x, mask.view(b, 1, 1, s))
words = x
sentence = m.blocks[-1](x, mask.view(b, 1, 1, s))
mask = tokens.ne(m.pad_token).unsqueeze(-1).to(sentence)
sentence = (sentence * mask).sum(dim=1) / mask.sum(dim=1)
sentence = m.head(sentence)
# return {
# 'context': words,
# 'y': sentence
# }
res = torch.cat([words, sentence.unsqueeze(1)], dim=1) # [1, 78, 1024]
return res
def wanx_model_init(device, model_path):
# [model] clip
tokenizer = data.HuggingfaceTokenizer(
path=f"{model_path}/clip_tokenizer", length=cfg.text_len, clean=True
)
clip = (
getattr(models, cfg.clip_model)(
vocab_size=len(tokenizer.tokenizer), pretrained=False
)
.eval()
.requires_grad_(False)
.textual.to(device)
)
clip.load_state_dict(torch.load(f"{model_path}/clip.pth", map_location="cpu"))
return clip, tokenizer
@threestudio.register("wanx-prompt-processor")
class WanXPromptProcessor(PromptProcessor):
@dataclass
class Config(PromptProcessor.Config):
prefix: str = "<wanx> "
cfg: Config
### these functions are unused, kept for debugging ###
def configure_text_encoder(self) -> None:
os.environ["TOKENIZERS_PARALLELISM"] = "false" # TODO check
self.text_encoder, self.tokenizer = wanx_model_init(
device=self.device, model_path=self.cfg.pretrained_model_name_or_path
)
for p in self.text_encoder.parameters():
p.requires_grad_(False)
def destroy_text_encoder(self) -> None:
del self.tokenizer
del self.text_encoder
cleanup()
def get_text_embeddings(
self, prompt: Union[str, List[str]], negative_prompt: Union[str, List[str]]
) -> Tuple[Float[Tensor, "B 77 768"], Float[Tensor, "B 77 768"]]:
if isinstance(prompt, str):
prompt = [prompt]
if isinstance(negative_prompt, str):
negative_prompt = [negative_prompt]
# Tokenize text and get embeddings
tokens = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
return_tensors="pt",
)
uncond_tokens = self.tokenizer(
negative_prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
return_tensors="pt",
)
with torch.no_grad():
text_embeddings = self.text_encoder(tokens.input_ids.to(self.device))[0]
uncond_text_embeddings = self.text_encoder(
uncond_tokens.input_ids.to(self.device)
)[0]
| return text_embeddings, uncond_text_embeddings |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: rehg-lab/RAVE
# Path: annotator/oneformer/detectron2/config/config.py
def configurable(init_func=None, *, from_config=None):
"""
Decorate a function or a class's __init__ method so that it can be called
with a :class:`CfgNode` object using a :func:`from_config` function that translates
:class:`CfgNode` to arguments.
Examples:
::
# Usage 1: Decorator on __init__:
class A:
@configurable
def __init__(self, a, b=2, c=3):
pass
@classmethod
def from_config(cls, cfg): # 'cfg' must be the first argument
# Returns kwargs to be passed to __init__
return {"a": cfg.A, "b": cfg.B}
a1 = A(a=1, b=2) # regular construction
a2 = A(cfg) # construct with a cfg
a3 = A(cfg, b=3, c=4) # construct with extra overwrite
# Usage 2: Decorator on any function. Needs an extra from_config argument:
@configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B})
def a_func(a, b=2, c=3):
pass
a1 = a_func(a=1, b=2) # regular call
a2 = a_func(cfg) # call with a cfg
a3 = a_func(cfg, b=3, c=4) # call with extra overwrite
Args:
init_func (callable): a class's ``__init__`` method in usage 1. The
class must have a ``from_config`` classmethod which takes `cfg` as
the first argument.
from_config (callable): the from_config function in usage 2. It must take `cfg`
as its first argument.
"""
if init_func is not None:
assert (
inspect.isfunction(init_func)
and from_config is None
and init_func.__name__ == "__init__"
), "Incorrect use of @configurable. Check API documentation for examples."
@functools.wraps(init_func)
def wrapped(self, *args, **kwargs):
try:
from_config_func = type(self).from_config
except AttributeError as e:
raise AttributeError(
"Class with @configurable must have a 'from_config' classmethod."
) from e
if not inspect.ismethod(from_config_func):
raise TypeError("Class with @configurable must have a 'from_config' classmethod.")
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)
init_func(self, **explicit_args)
else:
init_func(self, *args, **kwargs)
return wrapped
else:
if from_config is None:
return configurable # @configurable() is made equivalent to @configurable
assert inspect.isfunction(
from_config
), "from_config argument of configurable must be a function!"
def wrapper(orig_func):
@functools.wraps(orig_func)
def wrapped(*args, **kwargs):
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config, *args, **kwargs)
return orig_func(**explicit_args)
else:
return orig_func(*args, **kwargs)
wrapped.from_config = from_config
return wrapped
return wrapper
# Path: annotator/oneformer/detectron2/layers/wrappers.py
def shapes_to_tensor(x: List[int], device: Optional[torch.device] = None) -> torch.Tensor:
def check_if_dynamo_compiling():
def cat(tensors: List[torch.Tensor], dim: int = 0):
def empty_input_loss_func_wrapper(loss_func):
def wrapped_loss_func(input, target, *, reduction="mean", **kwargs):
def forward(ctx, x, new_shape):
def backward(ctx, grad):
def __init__(self, *args, **kwargs):
def forward(self, x):
def nonzero_tuple(x):
def move_device_like(src: torch.Tensor, dst: torch.Tensor) -> torch.Tensor:
class _NewEmptyTensorOp(torch.autograd.Function):
class Conv2d(torch.nn.Conv2d):
# Path: annotator/oneformer/detectron2/structures/instances.py
class Instances:
"""
This class represents a list of instances in an image.
It stores the attributes of instances (e.g., boxes, masks, labels, scores) as "fields".
All fields must have the same ``__len__`` which is the number of instances.
All other (non-field) attributes of this class are considered private:
they must start with '_' and are not modifiable by a user.
Some basic usage:
1. Set/get/check a field:
.. code-block:: python
instances.gt_boxes = Boxes(...)
print(instances.pred_masks) # a tensor of shape (N, H, W)
print('gt_masks' in instances)
2. ``len(instances)`` returns the number of instances
3. Indexing: ``instances[indices]`` will apply the indexing on all the fields
and returns a new :class:`Instances`.
Typically, ``indices`` is a integer vector of indices,
or a binary mask of length ``num_instances``
.. code-block:: python
category_3_detections = instances[instances.pred_classes == 3]
confident_detections = instances[instances.scores > 0.9]
"""
def __init__(self, image_size: Tuple[int, int], **kwargs: Any):
"""
Args:
image_size (height, width): the spatial size of the image.
kwargs: fields to add to this `Instances`.
"""
self._image_size = image_size
self._fields: Dict[str, Any] = {}
for k, v in kwargs.items():
self.set(k, v)
@property
def image_size(self) -> Tuple[int, int]:
"""
Returns:
tuple: height, width
"""
return self._image_size
def __setattr__(self, name: str, val: Any) -> None:
if name.startswith("_"):
super().__setattr__(name, val)
else:
self.set(name, val)
def __getattr__(self, name: str) -> Any:
if name == "_fields" or name not in self._fields:
raise AttributeError("Cannot find field '{}' in the given Instances!".format(name))
return self._fields[name]
def set(self, name: str, value: Any) -> None:
"""
Set the field named `name` to `value`.
The length of `value` must be the number of instances,
and must agree with other existing fields in this object.
"""
with warnings.catch_warnings(record=True):
data_len = len(value)
if len(self._fields):
assert (
len(self) == data_len
), "Adding a field of length {} to a Instances of length {}".format(data_len, len(self))
self._fields[name] = value
def has(self, name: str) -> bool:
"""
Returns:
bool: whether the field called `name` exists.
"""
return name in self._fields
def remove(self, name: str) -> None:
"""
Remove the field called `name`.
"""
del self._fields[name]
def get(self, name: str) -> Any:
"""
Returns the field called `name`.
"""
return self._fields[name]
def get_fields(self) -> Dict[str, Any]:
"""
Returns:
dict: a dict which maps names (str) to data of the fields
Modifying the returned dict will modify this instance.
"""
return self._fields
# Tensor-like methods
def to(self, *args: Any, **kwargs: Any) -> "Instances":
"""
Returns:
Instances: all fields are called with a `to(device)`, if the field has this method.
"""
ret = Instances(self._image_size)
for k, v in self._fields.items():
if hasattr(v, "to"):
v = v.to(*args, **kwargs)
ret.set(k, v)
return ret
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Instances":
"""
Args:
item: an index-like object and will be used to index all the fields.
Returns:
If `item` is a string, return the data in the corresponding field.
Otherwise, returns an `Instances` where all fields are indexed by `item`.
"""
if type(item) == int:
if item >= len(self) or item < -len(self):
raise IndexError("Instances index out of range!")
else:
item = slice(item, None, len(self))
ret = Instances(self._image_size)
for k, v in self._fields.items():
ret.set(k, v[item])
return ret
def __len__(self) -> int:
for v in self._fields.values():
# use __len__ because len() has to be int and is not friendly to tracing
return v.__len__()
raise NotImplementedError("Empty Instances does not support __len__!")
def __iter__(self):
raise NotImplementedError("`Instances` object is not iterable!")
@staticmethod
def cat(instance_lists: List["Instances"]) -> "Instances":
"""
Args:
instance_lists (list[Instances])
Returns:
Instances
"""
assert all(isinstance(i, Instances) for i in instance_lists)
assert len(instance_lists) > 0
if len(instance_lists) == 1:
return instance_lists[0]
image_size = instance_lists[0].image_size
if not isinstance(image_size, torch.Tensor): # could be a tensor in tracing
for i in instance_lists[1:]:
assert i.image_size == image_size
ret = Instances(image_size)
for k in instance_lists[0]._fields.keys():
values = [i.get(k) for i in instance_lists]
v0 = values[0]
if isinstance(v0, torch.Tensor):
values = torch.cat(values, dim=0)
elif isinstance(v0, list):
values = list(itertools.chain(*values))
elif hasattr(type(v0), "cat"):
values = type(v0).cat(values)
else:
raise ValueError("Unsupported type {} for concatenation".format(type(v0)))
ret.set(k, values)
return ret
def __str__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={}, ".format(len(self))
s += "image_height={}, ".format(self._image_size[0])
s += "image_width={}, ".format(self._image_size[1])
s += "fields=[{}])".format(", ".join((f"{k}: {v}" for k, v in self._fields.items())))
return s
__repr__ = __str__
# Path: annotator/oneformer/detectron2/structures/keypoints.py
@torch.jit.script_if_tracing
def heatmaps_to_keypoints(maps: torch.Tensor, rois: torch.Tensor) -> torch.Tensor:
"""
Extract predicted keypoint locations from heatmaps.
Args:
maps (Tensor): (#ROIs, #keypoints, POOL_H, POOL_W). The predicted heatmap of logits for
each ROI and each keypoint.
rois (Tensor): (#ROIs, 4). The box of each ROI.
Returns:
Tensor of shape (#ROIs, #keypoints, 4) with the last dimension corresponding to
(x, y, logit, score) for each keypoint.
When converting discrete pixel indices in an NxN image to a continuous keypoint coordinate,
we maintain consistency with :meth:`Keypoints.to_heatmap` by using the conversion from
Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate.
"""
offset_x = rois[:, 0]
offset_y = rois[:, 1]
widths = (rois[:, 2] - rois[:, 0]).clamp(min=1)
heights = (rois[:, 3] - rois[:, 1]).clamp(min=1)
widths_ceil = widths.ceil()
heights_ceil = heights.ceil()
num_rois, num_keypoints = maps.shape[:2]
xy_preds = maps.new_zeros(rois.shape[0], num_keypoints, 4)
width_corrections = widths / widths_ceil
height_corrections = heights / heights_ceil
keypoints_idx = torch.arange(num_keypoints, device=maps.device)
for i in range(num_rois):
outsize = (int(heights_ceil[i]), int(widths_ceil[i]))
roi_map = F.interpolate(maps[[i]], size=outsize, mode="bicubic", align_corners=False)
# Although semantically equivalent, `reshape` is used instead of `squeeze` due
# to limitation during ONNX export of `squeeze` in scripting mode
roi_map = roi_map.reshape(roi_map.shape[1:]) # keypoints x H x W
# softmax over the spatial region
max_score, _ = roi_map.view(num_keypoints, -1).max(1)
max_score = max_score.view(num_keypoints, 1, 1)
tmp_full_resolution = (roi_map - max_score).exp_()
tmp_pool_resolution = (maps[i] - max_score).exp_()
# Produce scores over the region H x W, but normalize with POOL_H x POOL_W,
# so that the scores of objects of different absolute sizes will be more comparable
roi_map_scores = tmp_full_resolution / tmp_pool_resolution.sum((1, 2), keepdim=True)
w = roi_map.shape[2]
pos = roi_map.view(num_keypoints, -1).argmax(1)
x_int = pos % w
y_int = (pos - x_int) // w
assert (
roi_map_scores[keypoints_idx, y_int, x_int]
== roi_map_scores.view(num_keypoints, -1).max(1)[0]
).all()
x = (x_int.float() + 0.5) * width_corrections[i]
y = (y_int.float() + 0.5) * height_corrections[i]
xy_preds[i, :, 0] = x + offset_x[i]
xy_preds[i, :, 1] = y + offset_y[i]
xy_preds[i, :, 2] = roi_map[keypoints_idx, y_int, x_int]
xy_preds[i, :, 3] = roi_map_scores[keypoints_idx, y_int, x_int]
return xy_preds
# Path: annotator/oneformer/detectron2/utils/events.py
def get_event_storage():
"""
Returns:
The :class:`EventStorage` object that's currently being used.
Throws an error if no :class:`EventStorage` is currently enabled.
"""
assert len(
_CURRENT_STORAGE_STACK
), "get_event_storage() has to be called inside a 'with EventStorage(...)' context!"
return _CURRENT_STORAGE_STACK[-1]
# Path: annotator/oneformer/detectron2/utils/registry.py
def _convert_target_to_string(t: Any) -> str:
def locate(name: str) -> Any:
# Path: annotator/oneformer/detectron2/modeling/roi_heads/keypoint_head.py
from typing import List
from torch import nn
from torch.nn import functional as F
from annotator.oneformer.detectron2.config import configurable
from annotator.oneformer.detectron2.layers import Conv2d, ConvTranspose2d, cat, interpolate
from annotator.oneformer.detectron2.structures import Instances, heatmaps_to_keypoints
from annotator.oneformer.detectron2.utils.events import get_event_storage
from annotator.oneformer.detectron2.utils.registry import Registry
import torch
# Copyright (c) Facebook, Inc. and its affiliates.
_TOTAL_SKIPPED = 0
__all__ = [
"ROI_KEYPOINT_HEAD_REGISTRY",
"build_keypoint_head",
"BaseKeypointRCNNHead",
| "KRCNNConvDeconvUpsampleHead",
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: worldcoin/open-iris
# Path: tests/unit_tests/utils.py
def generate_arc(
radius: float, center_x: float, center_y: float, from_angle: float, to_angle: float, num_points: int = 1000
) -> np.ndarray:
angles = np.linspace(from_angle, to_angle, num_points, endpoint=not (from_angle == 0.0 and to_angle == 2 * np.pi))
circle_xs = radius * np.cos(angles) + center_x
circle_ys = radius * np.sin(angles) + center_y
return np.column_stack([circle_xs, circle_ys])
# Path: tests/unit_tests/utils.py
def generate_multiple_arcs(arc_params: List[Dict[str, Any]]) -> np.ndarray:
return np.concatenate([generate_arc(**kwargs) for kwargs in arc_params])
# Path: tests/unit_tests/utils.py
def rotated_elliptical_contour(
theta: float, a: float = 5, b: float = 1, resolution: int = 200, centered=False
) -> np.ndarray:
r"""Compute the pixelised contour of a rotated ellipses.
This function creates a binary image where :math:`pixel = 1 \Leftrightarrow pixel \in` ellipse
Ellipse equation
:math:`(\frac{x}{a})^2 + (\frac{y}{b})^2 < 1`
Rotate by :math:`\theta`
:math:`(\frac{x cos(\theta) + y sin(\theta)}{a})^2 + (\frac{x sin(\theta) - y cos(\theta)}{b})^2 < 1`
Isolate x and y
:math:`((\frac{cos(\theta)}{b})^2 + (\frac{sin(\theta)}{a})^2)x^2 + 2 cos(\theta)sin(\theta)(b^2 - a^2)xy
+((\frac{sin(\theta)}{b})^2 + (\frac{cos(\theta)}{a})^2)y^2 < a^2b^2 \blacksquare`
Source: :math:`math`
Or https://www.maa.org/external_archive/joma/Volume8/Kalman/General.html because if it's on internet it's true.
Also, `resolution` determines the precision of the contour by being the side of the square binary image used to
generate contour, but also the diameter of the final ellipsis
Args:
theta (float): angle between the x axis and the major-axis of the ellipses
a (float): The semi-major axis of the ellipses. Must be below 10, or the ellipse could crop out of the image.
b (float): The semi-minor axis of the ellipses. Must be below 10, or the ellipse could crop out of the image.
resolution (int): side of the square binary image used to generate contour
Returns:
np.ndarray: produced contour of shape (_, 1, 2)
"""
x, y = np.meshgrid(np.linspace(-10, 10, resolution), np.linspace(-10, 10, resolution))
x, y = x.flatten(), y.flatten()
binary_map = (
((a * np.sin(theta)) ** 2 + (b * np.cos(theta)) ** 2) * x**2
+ (2 * (b**2 - a**2) * np.sin(theta) * np.cos(theta)) * x * y
+ ((a * np.cos(theta)) ** 2 + (b * np.sin(theta)) ** 2) * y**2
) < a**2 * b**2
binary_map = binary_map.reshape(resolution, resolution).astype(int)
contours, hierarchy = cv2.findContours(binary_map, mode=cv2.RETR_FLOODFILL, method=cv2.CHAIN_APPROX_SIMPLE)
parent_indices = np.flatnonzero(hierarchy[..., 3] == -1)
contours = [np.squeeze(contours[i]) for i in parent_indices]
final_contour = contours[0] if not centered else contours[0] - resolution / 2
return final_contour.astype(np.float32)
# Path: tests/unit_tests/utils/test_math.py
import math
import cv2
import numpy as np
import pytest
from iris.utils.math import (
apply_weights_1d,
area,
cartesian2polar,
eccentricity,
estimate_diameter,
orientation,
polar2cartesian,
polygon_length,
)
from tests.unit_tests.utils import generate_arc, generate_multiple_arcs, rotated_elliptical_contour
],
)
def test_orientation(input_contour: np.ndarray, expected_eye_orientation: float) -> None:
moments = cv2.moments(input_contour)
computed_eye_orientaiton = orientation(moments)
assert np.abs(computed_eye_orientaiton - expected_eye_orientation) < 1 / 360
@pytest.mark.parametrize(
"input_contour,expected_eccentricity",
[
(rotated_elliptical_contour(a=5, b=1, theta=-np.pi / 2), 0.838),
(rotated_elliptical_contour(a=5, b=1, theta=0), 0.838),
(rotated_elliptical_contour(a=5, b=1, theta=0.142857), 0.838),
(rotated_elliptical_contour(a=1, b=1, theta=0), 0),
(rotated_elliptical_contour(a=1e20, b=1, theta=0), 0.964),
(np.array([[0, 0], [0, 1], [1e-6, 0.5]]), 1),
(np.array([[0, 0], [0, 1]]), 1),
],
ids=[
"Same ellipse various angles (1/3)",
"Same ellipse various angles (2/3)",
"Same ellipse various angles (3/3)",
"circle",
"almost line",
"even more almost line",
"perfect line",
],
)
def test_eccentricity(input_contour: np.ndarray, expected_eccentricity: float) -> None:
moments = cv2.moments(input_contour)
computed_eccentricity = eccentricity(moments)
assert np.abs(computed_eccentricity - expected_eccentricity) < 1e-3
@pytest.mark.parametrize(
"scores_1d,weights_1d,expected_weighted_score",
[
([0, 1], [0.5, 0.5], 0.5),
([4, 4, 3], [1, 5, 4], 3.6),
([1], [0.1], 1),
([0, 0], [10, 1], 0),
([0.3, 0.21, 0.66], [0.4, 0.6, 0.11], 0.287027027027027),
],
)
def test_apply_weights_1d(
scores_1d: np.ndarray,
weights_1d: np.ndarray,
expected_weighted_score: float,
) -> None:
weighted_score = apply_weights_1d(scores_1d, weights_1d)
assert np.abs(weighted_score - expected_weighted_score) < 1e-6
@pytest.mark.parametrize(
"scores_1d,weights_1d",
[
([0, 1, 1], [0.5, 0.5]),
([2, 3, 4, 5], [0.5, 0.5]),
([1, 10], [1, 2, 3]),
([1], [0, 2]),
],
)
def test_apply_weights_1d_fails(scores_1d: np.ndarray, weights_1d: np.ndarray) -> None:
with pytest.raises(ValueError):
_ = apply_weights_1d(scores_1d, weights_1d)
@pytest.mark.parametrize(
"mock_polygon,max_point_distance,expected_length",
[
(np.array([[0, 0], [0, 0], [0, 0]]), 20, 0),
(np.array([[0, 0], [0, 1], [1, 1], [1, 0]]), 20, 4),
(np.array([[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]), 20, 4),
(generate_arc(1000, 10, 30, 0, 2 * np.pi, num_points=100000), 20, 2 * np.pi * 1000),
(
generate_multiple_arcs(
[
{
"radius": 1000,
"center_x": 0,
"center_y": 0,
"from_angle": 3 * np.pi / 4,
"to_angle": np.pi / 4,
"num_points": 25000,
},
{
"radius": 1000,
"center_x": 0,
"center_y": 0,
"from_angle": -np.pi / 4,
"to_angle": -3 * np.pi / 4,
"num_points": 25000,
},
]
),
100,
np.pi * 1000,
),
(np.array([[0, 0], [1, 0], [1, 1], [0, 1], [0, 4], [1, 4], [1, 5], [0, 5]]), 4, 9),
(np.array([[0, 0], [1, 0], [1, 1], [0, 1], [0, 4], [1, 4], [1, 5], [0, 5]]), 2, 6),
],
ids=[
"Empty polygon",
"Non-looping square",
"Looping square",
"Large circle",
"Two quarters of circle",
"Two squares separated by a distance below threshold",
"Two squares separated by a distance above threshold",
],
)
def test_polygon_length(mock_polygon: np.ndarray, max_point_distance: int, expected_length: float) -> None:
computed_length = polygon_length(mock_polygon, max_point_distance=max_point_distance)
assert math.isclose(computed_length, expected_length, rel_tol=1e-3)
@pytest.mark.parametrize(
| "mock_polygon", |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: DiffusionLight/DiffusionLight
# Path: relighting/inpainter.py
class BallInpainter():
def __init__(self, pipeline, sd_arch, control_generator, disable_water_mask=True):
self.pipeline = pipeline
self.sd_arch = sd_arch
self.control_generator = control_generator
self.median = {}
if disable_water_mask:
self._disable_water_mask()
def _disable_water_mask(self):
if hasattr(self.pipeline, "watermark"):
self.pipeline.watermark = NoWaterMark()
print("Disabled watermasking")
@classmethod
def from_sd(cls,
model,
controlnet=None,
device=0,
sampler="unipc",
torch_dtype=torch.float16,
disable_water_mask=True,
offload=False
):
if controlnet is not None:
control_signal_type = get_control_signal_type(controlnet)
controlnet = ControlNetModel.from_pretrained(controlnet, torch_dtype=torch.float16)
pipe = CustomStableDiffusionControlNetInpaintPipeline.from_pretrained(
model,
controlnet=controlnet,
torch_dtype=torch_dtype,
).to(device)
control_generator = ControlSignalGenerator("sd", control_signal_type, device=device)
else:
pipe = CustomStableDiffusionInpaintPipeline.from_pretrained(
model,
torch_dtype=torch_dtype,
).to(device)
control_generator = None
try:
if torch_dtype==torch.float16 and device != torch.device("cpu"):
pipe.enable_xformers_memory_efficient_attention()
except:
pass
pipe.set_progress_bar_config(disable=True)
pipe.scheduler = SAMPLERS[sampler].from_config(pipe.scheduler.config)
return BallInpainter(pipe, "sd", control_generator, disable_water_mask)
@classmethod
def from_sdxl(cls,
model,
controlnet=None,
device=0,
sampler="unipc",
torch_dtype=torch.float16,
disable_water_mask=True,
use_fixed_vae=True,
offload=False
):
vae = VAE_MODELS["sdxl"]
vae = AutoencoderKL.from_pretrained(vae, torch_dtype=torch_dtype).to(device) if use_fixed_vae else None
extra_kwargs = {"vae": vae} if vae is not None else {}
if controlnet is not None:
control_signal_type = get_control_signal_type(controlnet)
controlnet = ControlNetModel.from_pretrained(
controlnet,
variant="fp16" if torch_dtype == torch.float16 else None,
use_safetensors=True,
torch_dtype=torch_dtype,
).to(device)
pipe = CustomStableDiffusionXLControlNetInpaintPipeline.from_pretrained(
model,
controlnet=controlnet,
variant="fp16" if torch_dtype == torch.float16 else None,
use_safetensors=True,
torch_dtype=torch_dtype,
**extra_kwargs,
).to(device)
control_generator = ControlSignalGenerator("sdxl", control_signal_type, device=device)
else:
pipe = CustomStableDiffusionXLInpaintPipeline.from_pretrained(
model,
variant="fp16" if torch_dtype == torch.float16 else None,
use_safetensors=True,
torch_dtype=torch_dtype,
**extra_kwargs,
).to(device)
control_generator = None
try:
if torch_dtype==torch.float16 and device != torch.device("cpu"):
pipe.enable_xformers_memory_efficient_attention()
except:
pass
if offload and device != torch.device("cpu"):
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=True)
pipe.scheduler = SAMPLERS[sampler].from_config(pipe.scheduler.config)
return BallInpainter(pipe, "sdxl", control_generator, disable_water_mask)
# TODO: this method should be replaced by inpaint(), but we'll leave it here for now
# otherwise, the existing experiment code will break down
def __call__(self, *args, **kwargs):
return self.pipeline(*args, **kwargs)
def _default_height_width(self, height=None, width=None):
if (height is not None) and (width is not None):
return height, width
if self.sd_arch == "sd":
return (512, 512)
elif self.sd_arch == "sdxl":
return (1024, 1024)
else:
raise NotImplementedError
# this method is for sanity check only
def get_cache_control_image(self):
control_image = getattr(self, "cache_control_image", None)
return control_image
def prepare_control_signal(self, image, controlnet_conditioning_scale, extra_kwargs):
if self.control_generator is not None:
control_image = self.control_generator(image, **extra_kwargs)
controlnet_kwargs = {
"control_image": control_image,
"controlnet_conditioning_scale": controlnet_conditioning_scale
}
self.cache_control_image = control_image
else:
controlnet_kwargs = {}
return controlnet_kwargs
def get_cache_median(self, it):
if it in self.median: return self.median[it]
else: return None
def reset_median(self):
self.median = {}
print("Reset median")
def load_median(self, path):
if os.path.exists(path):
with open(path, "rb") as f:
self.median = pickle.load(f)
print(f"Loaded median from {path}")
else:
print(f"Median not found at {path}!")
def inpaint_iterative(
self,
prompt=None,
negative_prompt="",
num_inference_steps=30,
generator=None, # TODO: remove this
image=None,
mask_image=None,
height=None,
width=None,
controlnet_conditioning_scale=0.5,
num_images_per_prompt=1,
current_seed=0,
cross_attention_kwargs={},
strength=0.8,
num_iteration=2,
ball_per_iteration=30,
agg_mode="median",
save_intermediate=True,
cache_dir="./temp_inpaint_iterative",
disable_progress=False,
prompt_embeds=None,
pooled_prompt_embeds=None,
use_cache_median=False,
**extra_kwargs,
):
def computeMedian(ball_images):
all = np.stack(ball_images, axis=0)
median = np.median(all, axis=0)
idx_median = np.argsort(all, axis=0)[all.shape[0]//2]
# print(all.shape)
# print(idx_median.shape)
return median, idx_median
def generate_balls(avg_image, current_strength, ball_per_iteration, current_iteration):
print(f"Inpainting balls for {current_iteration} iteration...")
controlnet_kwargs = self.prepare_control_signal(
image=avg_image,
controlnet_conditioning_scale=controlnet_conditioning_scale,
extra_kwargs=extra_kwargs,
)
ball_images = []
for i in tqdm(range(ball_per_iteration), disable=disable_progress):
seed = current_seed + i
new_generator = torch.Generator().manual_seed(seed)
output_image = self.pipeline(
prompt=prompt,
negative_prompt=negative_prompt,
num_inference_steps=num_inference_steps,
generator=new_generator,
image=avg_image,
mask_image=mask_image,
height=height,
width=width,
num_images_per_prompt=num_images_per_prompt,
strength=current_strength,
newx=x,
newy=y,
newr=r,
current_seed=seed,
cross_attention_kwargs=cross_attention_kwargs,
prompt_embeds=prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
**controlnet_kwargs
).images[0]
ball_image = crop_ball(output_image, mask_ball_for_crop, x, y, r)
ball_images.append(ball_image)
if save_intermediate:
os.makedirs(os.path.join(cache_dir, str(current_iteration)), mode=0o777, exist_ok=True)
output_image.save(os.path.join(cache_dir, str(current_iteration), f"raw_{i}.png"))
Image.fromarray(ball_image).save(os.path.join(cache_dir, str(current_iteration), f"ball_{i}.png"))
# chmod 777
os.chmod(os.path.join(cache_dir, str(current_iteration), f"raw_{i}.png"), 0o0777)
os.chmod(os.path.join(cache_dir, str(current_iteration), f"ball_{i}.png"), 0o0777)
return ball_images
if save_intermediate:
os.makedirs(cache_dir, exist_ok=True)
height, width = self._default_height_width(height, width)
x = extra_kwargs["x"]
y = extra_kwargs["y"]
r = 256 if "r" not in extra_kwargs else extra_kwargs["r"]
_, mask_ball_for_crop = get_ideal_normal_ball(size=r)
# generate initial average ball
avg_image = image
ball_images = generate_balls(
avg_image,
current_strength=1.0,
ball_per_iteration=ball_per_iteration,
current_iteration=0,
)
# ball refinement loop
image = np.array(image)
for it in range(1, num_iteration+1):
if use_cache_median and (self.get_cache_median(it) is not None):
print("Use existing median")
all = np.stack(ball_images, axis=0)
idx_median = self.get_cache_median(it)
avg_ball = all[idx_median,
np.arange(idx_median.shape[0])[:, np.newaxis, np.newaxis],
np.arange(idx_median.shape[1])[np.newaxis, :, np.newaxis],
np.arange(idx_median.shape[2])[np.newaxis, np.newaxis, :]
]
else:
avg_ball, idx_median = computeMedian(ball_images)
print("Add new median")
self.median[it] = idx_median
avg_image = merge_normal_map(image, avg_ball, mask_ball_for_crop, x, y)
avg_image = Image.fromarray(avg_image.astype(np.uint8))
if save_intermediate:
avg_image.save(os.path.join(cache_dir, f"average_{it}.png"))
# chmod777
os.chmod(os.path.join(cache_dir, f"average_{it}.png"), 0o0777)
ball_images = generate_balls(
avg_image,
current_strength=strength,
ball_per_iteration=ball_per_iteration if it < num_iteration else 1,
current_iteration=it,
)
# TODO: add algorithm for select the best ball
best_ball = ball_images[0]
output_image = merge_normal_map(image, best_ball, mask_ball_for_crop, x, y)
return Image.fromarray(output_image.astype(np.uint8))
def inpaint(
self,
prompt=None,
negative_prompt=None,
num_inference_steps=30,
generator=None,
image=None,
mask_image=None,
height=None,
width=None,
controlnet_conditioning_scale=0.5,
num_images_per_prompt=1,
strength=1.0,
current_seed=0,
cross_attention_kwargs={},
prompt_embeds=None,
pooled_prompt_embeds=None,
**extra_kwargs,
):
height, width = self._default_height_width(height, width)
controlnet_kwargs = self.prepare_control_signal(
image=image,
controlnet_conditioning_scale=controlnet_conditioning_scale,
extra_kwargs=extra_kwargs,
)
if generator is None:
generator = torch.Generator().manual_seed(0)
output_image = self.pipeline(
prompt=prompt,
negative_prompt=negative_prompt,
num_inference_steps=num_inference_steps,
generator=generator,
image=image,
mask_image=mask_image,
height=height,
width=width,
num_images_per_prompt=num_images_per_prompt,
strength=strength,
newx = extra_kwargs["x"],
newy = extra_kwargs["y"],
newr = getattr(extra_kwargs, "r", 256), # default to ball_size = 256
current_seed=current_seed,
cross_attention_kwargs=cross_attention_kwargs,
prompt_embeds=prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
**controlnet_kwargs
)
return output_image
# Path: relighting/mask_utils.py
class MaskGenerator():
def __init__(self, cache_mask=True):
self.cache_mask = cache_mask
self.all_masks = []
def clear_cache(self):
self.all_masks = []
def retrieve_masks(self):
return self.all_masks
def generate_grid(self, image, mask_ball, n_ball=16, size=128):
ball_positions = create_grid(image.size, n_ball, size)
# _, mask_ball = get_normal_ball(size)
masks = []
mask_template = np.zeros(image.size)
for x, y in ball_positions:
mask = mask_template.copy()
mask[y:y+size, x:x+size] = 255 * mask_ball
mask = Image.fromarray(mask.astype(np.uint8), "L")
masks.append(mask)
# if self.cache_mask:
# self.all_masks.append((x, y, size))
return masks, ball_positions
def generate_single(self, image, mask_ball, x, y, size):
w,h = image.size # numpy as (h,w) but PIL is (w,h)
mask = np.zeros((h,w))
mask[y:y+size, x:x+size] = 255 * mask_ball
mask = Image.fromarray(mask.astype(np.uint8), "L")
return mask
def generate_best(self, image, mask_ball, size):
w,h = image.size # numpy as (h,w) but PIL is (w,h)
mask = np.zeros((h,w))
(y, x), _ = find_best_location(np.array(image), ball_size=size)
mask[y:y+size, x:x+size] = 255 * mask_ball
mask = Image.fromarray(mask.astype(np.uint8), "L")
return mask, (x, y)
# Path: relighting/ball_processor.py
def get_ideal_normal_ball(size, flip_x=True):
"""
Generate normal ball for specific size
Normal map is x "left", y up, z into the screen
(we flip X to match sobel operator)
@params
- size (int) - single value of height and width
@return:
- normal_map (np.array) - normal map [size, size, 3]
- mask (np.array) - mask that make a valid normal map [size,size]
"""
# we flip x to match sobel operator
x = torch.linspace(1, -1, size)
y = torch.linspace(1, -1, size)
x = x.flip(dims=(-1,)) if not flip_x else x
y, x = torch.meshgrid(y, x)
z = (1 - x**2 - y**2)
mask = z >= 0
# clean up invalid value outsize the mask
x = x * mask
y = y * mask
z = z * mask
# get real z value
z = torch.sqrt(z)
# clean up normal map value outside mask
normal_map = torch.cat([x[..., None], y[..., None], z[..., None]], dim=-1)
normal_map = normal_map.numpy()
mask = mask.numpy()
return normal_map, mask
# Path: relighting/ball_processor.py
def crop_ball(image, mask_ball, x, y, size, apply_mask=True, bg_color = (0, 0, 0)):
if isinstance(image, Image.Image):
result = np.array(image)
else:
result = image.copy()
result = result[y:y+size, x:x+size]
if apply_mask:
result[~mask_ball] = bg_color
return result
# Path: relighting/dataset.py
class GeneralLoader(Dataset):
def __init__(self,
root=None,
num_samples=None,
res_threshold=((1024, 1024)),
apply_threshold=False,
random_shuffle=False,
process_id = 0,
process_total = 1,
limit_input = 0,
**kwargs,
):
super().__init__(**kwargs)
self.root = root
self.res_threshold = res_threshold
self.apply_threshold = apply_threshold
self.has_meta = False
if self.root is not None:
if not os.path.exists(self.root):
raise Exception(f"Dataset {self.root} does not exist.")
paths = natsorted(
list(glob.glob(os.path.join(self.root, "*.png"))) + \
list(glob.glob(os.path.join(self.root, "*.jpg")))
)
self.scene_data = self._load_data_path(paths, num_samples=num_samples)
if random_shuffle:
SEED = 0
random.Random(SEED).shuffle(self.scene_data)
random.Random(SEED).shuffle(self.boundary_info)
if limit_input > 0:
self.scene_data = self.scene_data[:limit_input]
self.boundary_info = self.boundary_info[:limit_input]
# please keep this one the last, so, we will filter out scene_data and boundary info
if process_total > 1:
self.scene_data = self.scene_data[process_id::process_total]
self.boundary_info = self.boundary_info[process_id::process_total]
print(f"Process {process_id} has {len(self.scene_data)} samples")
def _load_data_path(self, paths, num_samples=None):
if os.path.exists(os.path.splitext(paths[0])[0] + ".json") or os.path.exists(os.path.splitext(paths[-1])[0] + ".json"):
self.has_meta = True
if self.has_meta:
# read metadata
TARGET_KEY = "chrome_mask256"
for path in paths:
with open(os.path.splitext(path)[0] + ".json") as f:
meta = json.load(f)
self.meta_data.append(meta)
boundary = {
"x": meta[TARGET_KEY]["x"],
"y": meta[TARGET_KEY]["y"],
"size": meta[TARGET_KEY]["w"],
}
self.boundary_info.append(boundary)
scene_data = paths
if self.apply_threshold:
scene_data = []
for path in tqdm(paths):
img = Image.open(path)
if (img.size[0] >= self.res_threshold[0]) and (img.size[1] >= self.res_threshold[1]):
scene_data.append(path)
if num_samples is not None:
max_idx = min(num_samples, len(scene_data))
scene_data = scene_data[:max_idx]
return scene_data
@classmethod
def from_image_paths(cls, paths, *args, **kwargs):
dataset = cls(*args, **kwargs)
dataset.scene_data = dataset._load_data_path(paths)
return dataset
# Path: relighting/utils.py
def name2hash(name: str):
"""
@see https://stackoverflow.com/questions/16008670/how-to-hash-a-string-into-8-digits
"""
hash_number = int(hashlib.sha1(name.encode("utf-8")).hexdigest(), 16) % (10 ** 8)
return hash_number
# Path: relighting/argument.py
SD_MODELS = {
"sd15_old": "runwayml/stable-diffusion-inpainting",
"sd15_new": "runwayml/stable-diffusion-inpainting",
"sd21": "stabilityai/stable-diffusion-2-inpainting",
"sdxl": "stabilityai/stable-diffusion-xl-base-1.0",
"sdxl_fast": "stabilityai/stable-diffusion-xl-base-1.0",
"sd15_depth": "runwayml/stable-diffusion-inpainting",
}
# Path: relighting/argument.py
CONTROLNET_MODELS = {
"sd15_old": "fusing/stable-diffusion-v1-5-controlnet-normal",
"sd15_new": "lllyasviel/control_v11p_sd15_normalbae",
"sd21": "thibaud/controlnet-sd21-normalbae-diffusers",
"sdxl": "diffusers/controlnet-depth-sdxl-1.0",
"sdxl_fast": "diffusers/controlnet-depth-sdxl-1.0-small",
"sd15_depth": "lllyasviel/control_v11f1p_sd15_depth",
}
# Path: relighting/argument.py
VAE_MODELS = {
"sdxl": "madebyollin/sdxl-vae-fp16-fix",
"sdxl_fast": "madebyollin/sdxl-vae-fp16-fix",
}
# Path: inpaint.py
import torch
import argparse
import numpy as np
import torch.distributed as dist
import os
import json
import relighting.dist_utils as dist_util
import time
from PIL import Image
from tqdm.auto import tqdm
from relighting.inpainter import BallInpainter
from relighting.mask_utils import MaskGenerator
from relighting.ball_processor import (
get_ideal_normal_ball,
crop_ball
)
from relighting.dataset import GeneralLoader
from relighting.utils import name2hash
from relighting.argument import (
SD_MODELS,
CONTROLNET_MODELS,
VAE_MODELS
)
# inpaint the ball on an image
# this one is design for general image that does not require special location to place
# cross import from inpaint_multi-illum.py
def create_argparser():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", type=str, required=True ,help='directory that contain the image') #dataset name or directory
parser.add_argument("--ball_size", type=int, default=256, help="size of the ball in pixel")
parser.add_argument("--ball_dilate", type=int, default=20, help="How much pixel to dilate the ball to make a sharper edge")
parser.add_argument("--prompt", type=str, default="a perfect mirrored reflective chrome ball sphere")
parser.add_argument("--prompt_dark", type=str, default="a perfect black dark mirrored reflective chrome ball sphere")
parser.add_argument("--negative_prompt", type=str, default="matte, diffuse, flat, dull")
parser.add_argument("--model_option", default="sdxl", help='selecting fancy model option (sd15_old, sd15_new, sd21, sdxl)') # [sd15_old, sd15_new, or sd21]
parser.add_argument("--output_dir", required=True, type=str, help="output directory")
parser.add_argument("--img_height", type=int, default=1024, help="Dataset Image Height")
parser.add_argument("--img_width", type=int, default=1024, help="Dataset Image Width")
# some good seed 0, 37, 71, 125, 140, 196, 307, 434, 485, 575 | 9021, 9166, 9560, 9814, but default auto is for fairness
parser.add_argument("--seed", default="auto", type=str, help="Seed: right now we use single seed instead to reduce the time, (Auto will use hash file name to generate seed)")
| parser.add_argument("--denoising_step", default=30, type=int, help="number of denoising step of diffusion model") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: eliphatfs/zerorf
# Path: lib/models/autoencoders/multiscene_nerf.py
class MultiSceneNeRF(BaseNeRF):
def __init__(self,
*args,
cache_size=0, # cache in RAM, top priority
cache_16bit=False,
num_file_writers=0, # cache in file system (for large dataset)
**kwargs):
super().__init__(*args, **kwargs)
self.cache_size = cache_size
self.cache_16bit = cache_16bit
if cache_size > 0:
rank, ws = get_dist_info()
split_points = np.round(np.linspace(0, cache_size, num=ws + 1)).astype(np.int64)
inds = np.arange(start=split_points[rank], stop=split_points[rank + 1])
self.cache = {ind: None for ind in inds}
else:
self.cache = None
self.cache_loaded = False
self.num_file_writers = num_file_writers
self.is_file_writers_initialized = False
def init_file_writers(self, save_dir):
if self.num_file_writers > 0:
def file_writer(queue):
while True:
obj = queue.get()
torch.save(obj, os.path.join(save_dir, obj['scene_name'] + '.pth'))
self.file_queues = [mp.Queue(maxsize=1) for _ in range(self.num_file_writers)]
for queue in self.file_queues:
p = mp.Process(target=file_writer, args=(queue,))
p.start()
else:
self.file_queues = None
self.is_file_writers_initialized = True
def load_cache(self, data, freeze_code=False):
device = get_module_device(self)
num_scenes = len(data['scene_id'])
rank, ws = get_dist_info()
if self.cache is not None:
if not self.cache_loaded:
cache_load_from = self.train_cfg.get('cache_load_from', None)
loaded = False
if cache_load_from is not None:
cache_files = os.listdir(cache_load_from)
cache_files.sort()
if len(cache_files) > 0:
assert len(cache_files) == self.cache_size
cacheiter = list(self.cache.keys())
if sys.stdout.isatty() and rank == 0:
cacheiter = tqdm.tqdm(cacheiter)
for ind in cacheiter:
self.cache[ind] = torch.load(
os.path.join(cache_load_from, cache_files[ind]), map_location='cpu')
loaded = True
if rank == 0:
mmcv.print_log('Loaded cache files from ' + cache_load_from + '.', 'mmgen')
if not loaded:
if rank == 0:
mmcv.print_log('Initialize codes from scratch.', 'mmgen')
self.cache_loaded = True
cache_list = [self.cache[scene_id_single] for scene_id_single in data['scene_id']]
elif 'code' in data:
cache_list = data['code']
else:
cache_list = [None for _ in range(num_scenes)]
code_list_ = []
density_grid = []
density_bitfield = []
for scene_state_single in cache_list:
if scene_state_single is None:
code_list_.append(self.get_init_code_(None, device))
density_grid.append(self.get_init_density_grid(None, device))
density_bitfield.append(self.get_init_density_bitfield(None, device))
else:
if 'code_' in scene_state_single['param']:
code_ = scene_state_single['param']['code_'].to(dtype=torch.float32, device=device)
else:
assert 'code' in scene_state_single['param']
if rank == 0:
warnings.warn(
'Pre-activation codes not found. Using on-the-fly inversion instead '
'(which could be inconsistent).')
code_ = self.code_activation.inverse(
scene_state_single['param']['code'].to(dtype=torch.float32, device=device))
code_list_.append(code_.requires_grad_(not freeze_code))
density_grid.append(
scene_state_single['param']['density_grid'].to(device)
if 'density_grid' in scene_state_single['param']
else self.get_init_density_grid(None, device))
density_bitfield.append(
scene_state_single['param']['density_bitfield'].to(device)
if 'density_bitfield' in scene_state_single['param']
else self.get_init_density_bitfield(None, device))
density_grid = torch.stack(density_grid, dim=0)
density_bitfield = torch.stack(density_bitfield, dim=0)
code_optimizers = self.build_optimizer(code_list_, self.train_cfg)
for ind, scene_state_single in enumerate(cache_list):
if scene_state_single is not None and 'optimizer' in scene_state_single:
optimizer_set_state(code_optimizers[ind], scene_state_single['optimizer'])
return code_list_, code_optimizers, density_grid, density_bitfield
def save_cache(self, code_list_, code_optimizers,
density_grid, density_bitfield, scene_id, scene_name):
if self.cache_16bit:
code_dtype = torch.float16 if code_list_[0].dtype == torch.float32 else code_list_[0].dtype
optimizer_dtype = torch.bfloat16
else:
code_dtype = code_list_[0].dtype
optimizer_dtype = torch.float32
if 'save_dir' in self.train_cfg:
save_dir = self.train_cfg['save_dir']
os.makedirs(save_dir, exist_ok=True)
if not self.is_file_writers_initialized:
self.init_file_writers(save_dir)
else:
save_dir = None
for ind, code_single_ in enumerate(code_list_):
scene_id_single = scene_id[ind]
out = dict(
scene_id=scene_id_single,
scene_name=scene_name[ind],
param=dict(
code_=code_single_.data,
density_grid=density_grid[ind],
density_bitfield=density_bitfield[ind]),
optimizer=code_optimizers[ind].state_dict())
if self.cache is not None:
if self.cache[scene_id_single] is None:
self.cache[scene_id_single] = out_dict_to(
out, device='cpu', code_dtype=code_dtype, optimizer_dtype=optimizer_dtype)
else:
if 'scene_id' not in self.cache[scene_id_single]:
self.cache[scene_id_single]['scene_id'] = out['scene_id']
if 'scene_name' not in self.cache[scene_id_single]:
self.cache[scene_id_single]['scene_name'] = out['scene_name']
if 'code' in self.cache[scene_id_single]['param']:
del self.cache[scene_id_single]['param']['code']
for key, val in out['param'].items():
load_tensor_to_dict(self.cache[scene_id_single]['param'], key, val,
device='cpu', dtype=code_dtype)
if 'optimizer' in self.cache[scene_id_single]:
optimizer_state_copy(out['optimizer'], self.cache[scene_id_single]['optimizer'],
device='cpu', dtype=optimizer_dtype)
else:
self.cache[scene_id_single]['optimizer'] = optimizer_state_to(
out['optimizer'], device='cpu', dtype=optimizer_dtype)
if save_dir is not None:
if self.file_queues is not None:
self.file_queues[ind // self.num_file_writers].put(
out_dict_to(out, device='cpu', code_dtype=code_dtype, optimizer_dtype=optimizer_dtype))
else:
torch.save(
out_dict_to(out, device='cpu', code_dtype=code_dtype, optimizer_dtype=optimizer_dtype),
os.path.join(save_dir, scene_name + '.pth'))
def train_step(self, data, optimizer, running_status=None):
code_list_, code_optimizers, density_grid, density_bitfield = self.load_cache(data)
# ==== optimize code ====
cond_imgs = data['cond_imgs'] # (num_scenes, num_imgs, h, w, 3)
cond_intrinsics = data['cond_intrinsics'] # (num_scenes, num_imgs, 4), in [fx, fy, cx, cy]
cond_poses = data['cond_poses']
cond_times = data.get('cond_times')
num_scenes, num_imgs, h, w, _ = cond_imgs.size()
# (num_scenes, num_imgs, h, w, 3)
cond_rays_o, cond_rays_d = get_cam_rays(cond_poses, cond_intrinsics, h, w)
dt_gamma_scale = self.train_cfg.get('dt_gamma_scale', 0.0)
# (num_scenes,)
dt_gamma = dt_gamma_scale / cond_intrinsics[..., :2].mean(dim=(-2, -1))
extra_scene_step = self.train_cfg.get('extra_scene_step', 0)
if extra_scene_step > 0:
cfg = self.train_cfg.copy()
cfg['n_inverse_steps'] = extra_scene_step
self.inverse_code(
self.decoder, cond_imgs, cond_rays_o, cond_rays_d, dt_gamma=dt_gamma, cfg=cfg,
code_=code_list_,
density_grid=density_grid,
density_bitfield=density_bitfield,
code_optimizer=code_optimizers)
# ==== joint optimization ====
for code_optimizer in code_optimizers:
code_optimizer.zero_grad()
optimizer['decoder'].zero_grad()
code = self.code_activation(torch.stack(code_list_, dim=0), update_stats=True)
loss, log_vars, out_rgbs, target_rgbs = self.loss_decoder(
self.decoder, code, density_bitfield, cond_rays_o, cond_rays_d,
cond_imgs, dt_gamma=dt_gamma, cond_times=cond_times, cfg=self.train_cfg,
update_extra_state=self.update_extra_iters,
extra_args=(density_grid, density_bitfield, 0),
extra_kwargs=dict(
density_thresh=self.train_cfg['density_thresh']
) if 'density_thresh' in self.train_cfg else dict())
loss.backward()
log_vars.update(loss=float(loss))
if self.train_cfg.get('decoder_grad_clip', 0.0) > 0.0:
decoder_grad_norm = torch.nn.utils.clip_grad_norm_(
self.decoder.parameters(), self.train_cfg['decoder_grad_clip'])
log_vars.update(decoder_grad_norm=float(decoder_grad_norm))
optimizer['decoder'].step()
for code_optimizer in code_optimizers:
code_optimizer.step()
# ==== save cache ====
self.save_cache(
code_list_, code_optimizers,
density_grid, density_bitfield, data['scene_id'], data['scene_name'])
# ==== evaluate reconstruction ====
with torch.no_grad():
self.mean_ema_update(code)
train_psnr = eval_psnr(out_rgbs, target_rgbs)
code_rms = code.square().flatten(1).mean().sqrt()
log_vars.update(train_psnr=float(train_psnr.mean()),
code_rms=float(code_rms.mean()))
if 'test_imgs' in data and data['test_imgs'] is not None:
log_vars.update(self.eval_and_viz(
data, self.decoder, code, density_bitfield, cfg=self.train_cfg))
# ==== outputs ====
outputs_dict = dict(
log_vars=log_vars, num_samples=num_scenes)
return outputs_dict
# Path: lib/core/optimizer/builder.py
def build_optimizers(model, cfgs):
"""Modified from MMGeneration
"""
optimizers = {}
if hasattr(model, 'module'):
model = model.module
# determine whether 'cfgs' has several dicts for optimizers
is_dict_of_dict = True
for key, cfg in cfgs.items():
if not isinstance(cfg, dict):
is_dict_of_dict = False
if is_dict_of_dict:
for key, cfg in cfgs.items():
cfg_ = cfg.copy()
module = rgetattr(model, key)
optimizers[key] = build_optimizer(module, cfg_)
return optimizers
return build_optimizer(model, cfgs)
# Path: lib/core/ssdnerf_gui.py
class OrbitCamera:
def __init__(self, name, W, H, r=2., fovy=60., euler=[0, 0, 0]):
self.name = name
self.W = W
self.H = H
self.radius = r # camera distance from center
self.fovy = fovy # in degree
self.center = np.array([0, 0, 0], dtype=np.float32) # look at this point
self.default_rot = R.from_quat([0.5, -0.5, 0.5, -0.5])
self.rot = copy.deepcopy(self.default_rot)
self.up = np.array([0, 0, 1], dtype=np.float32) # need to be normalized!
self.set_euler(euler)
# pose
@property
def pose(self):
# first move camera to radius
res = np.eye(4, dtype=np.float32)
res[2, 3] -= self.radius
# rotate
rot = np.eye(4, dtype=np.float32)
rot[:3, :3] = self.rot.as_matrix()
res = rot @ res
# translate
res[:3, 3] -= self.center
return res
def set_pose(self, pose):
self.rot = R.from_matrix(pose[:3, :3])
self.center = -pose[:3, 3] - self.rot.as_matrix()[:3, 2] * self.radius
@property
def intrinsics(self):
focal = self.H / (2 * np.tan(np.radians(self.fovy) / 2))
return np.array([focal, focal, self.W / 2, self.H / 2])
@property
def euler(self):
return (self.rot * self.default_rot.inv()).as_euler('xyz', degrees=True)
def set_euler(self, euler):
self.rot = R.from_euler('xyz', euler, degrees=True) * self.default_rot
def orbit(self, dx, dy):
# rotate along camera up/side axis!
side = self.rot.as_matrix()[:3, 0] # why this is side --> ? # already normalized.
rotvec_x = self.up * np.radians(-0.1 * dx)
rotvec_y = side * np.radians(-0.1 * dy)
self.rot = R.from_rotvec(rotvec_x) * R.from_rotvec(rotvec_y) * self.rot
def scale(self, delta):
self.radius *= 1.1 ** (-delta)
def pan(self, dx, dy, dz=0):
# pan in camera coordinate system (careful on the sensitivity!)
self.center += 0.0005 * self.rot.as_matrix()[:3, :3] @ np.array([dx, dy, dz])
def pose2str(self):
with np.printoptions(precision=3, suppress=True):
return str(self.pose)
# Path: lib/datasets/nerf_synthetic.py
class NerfSynthetic(Dataset):
def __init__(
self, meta_files: list, world_scale: float = 1.0, rgba: bool = False
) -> None:
super().__init__()
self.meta_files = meta_files
self.world_scale = world_scale
self.rgba = rgba
def __len__(self):
return len(self.meta_files)
def load_sub(self, sub):
with open(sub) as mf:
meta = json.load(mf)
frames_i = []
frames_p = []
frames_c = []
frames_t = []
for frame in range(len(meta['frames'])):
img = plotlib.imread(os.path.join(os.path.dirname(sub), meta['frames'][frame]['file_path'] + '.png'))
h, w, c = img.shape
x, y = w / 2, h / 2
focal_length = y / numpy.tan(meta['camera_angle_x'] / 2)
# scaling = 320.0 / img.shape[0]
scaling = 1.0
if not self.rgba:
img = img[..., :3] * img[..., 3:] + (1 - img[..., 3:])
# img = cv2.resize(img, [320, 320], interpolation=cv2.INTER_AREA)
pose = meta['frames'][frame]['transform_matrix']
frames_i.append(img)
frames_p.append((numpy.array(pose) @ BLENDER_TO_OPENCV_MATRIX) * self.world_scale)
frames_c.append(numpy.array([focal_length, focal_length, x, y]) * scaling)
if 'time' in meta['frames'][frame]:
frames_t.append(meta['frames'][frame]['time'])
f32 = numpy.float32
return dict(
cond_imgs=numpy.array(frames_i, f32),
cond_poses=numpy.array(frames_p, f32),
cond_intrinsics=numpy.array(frames_c, f32),
cond_times=numpy.array(frames_t, f32) * 2 - 1 if len(frames_t) else None
)
def __getitem__(self, index):
sub = self.meta_files[index]
return dict(
scene_id=DC(index, cpu_only=True),
scene_name=DC(sub, cpu_only=True),
**self.load_sub(sub)
)
# Path: lib/datasets/oppo.py
class OppoDataset(Dataset):
def __init__(
self, root_dir: str, split: str, world_scale: float = 1.0, rgba: bool = False
) -> None:
super().__init__()
self.root_dir = root_dir
self.world_scale = world_scale
self.rgba = rgba
self.split = split
self.downsample = 4.0
self.img_wh = (int(2656 / self.downsample), int(3984 / self.downsample))
self.define_transforms()
# self.scene_bbox = torch.tensor([[-0.5, -0.5, -0.5], [0.5, 0.5, 0.5]])
# self.near_far = [0.5, 1.5]
camera_file = os.path.join(self.root_dir, f"../../transforms_alignz_{split}.json")
with open(camera_file, 'r') as f:
self.meta = json.load(f)['frames']
self.poses = []
self.imgs = []
self.intrinsic = []
w, h = self.img_wh
for k, v in self.meta.items():
imgid = v['file_path'].split('/')[-1]
focal = 0.5 * v['calib_imgw'] / np.tan(0.5 * v['camera_angle_x']) # original focal length
if self.downsample != 1.0:
focal = focal / self.downsample
image_path = os.path.join(self.root_dir, f"../Lights/013/raw_undistorted/{imgid}.JPG")
c2w = np.array(v['transform_matrix'])
c2w = torch.FloatTensor(c2w)
self.poses.append(c2w)
self.intrinsic.append(torch.tensor([focal, focal, w / 2, h / 2])) # focal, focal, cx, cy
img = Image.open(image_path)
if self.downsample != 1.0:
img = img.resize(self.img_wh, Image.LANCZOS)
img = self.transform(img) # (4, h, w)
if self.split == 'train':
mask_path = os.path.join(self.root_dir, f"com_masks/{imgid}.png")
else:
# mask_path = os.path.join(self.root_dir, f"obj_masks/{imgid}.png")
mask_path = os.path.join(self.root_dir, f"com_masks/{imgid}.png")
mask = cv2.imread(mask_path, 2) > 0
if self.downsample != 1.0:
mask = cv2.resize(mask.astype(np.uint8), self.img_wh) > 0
mask = torch.from_numpy(mask).bool()
img = img.permute(1,2,0)
img = img * mask[...,None].float() + (1 - mask[...,None].float()) # blend A to RGB
if rgba:
img = torch.cat([img, mask[..., None]], dim=-1)
self.imgs += [img]
self.poses = torch.stack(self.poses, dim=0) * self.world_scale
# self.poses = transform_poses_pca(np.array(self.poses))
self.imgs = torch.stack(self.imgs, dim=0)
self.intrinsic = torch.stack(self.intrinsic, dim=0)
def define_transforms(self):
self.transform = T.ToTensor()
def __len__(self):
return 1
def __getitem__(self, index):
return dict(
scene_id=DC(index, cpu_only=True),
scene_name=DC(self.root_dir, cpu_only=True),
cond_imgs=np.array(self.imgs, np.float32),
cond_poses=np.array(self.poses, np.float32),
cond_intrinsics=np.array(self.intrinsic, np.float32)
)
# Path: opt.py
def config_parser(cmd=None):
parser = configargparse.ArgumentParser()
# experiment
parser.add_argument('--load-image', type=str, default=None,
help='zero123pp image path')
parser.add_argument("--proj-name", type=str, default="test",
help='experiment name')
parser.add_argument("--wandb-project", type=str,
default="zerorf", help='wandb project name')
# data
parser.add_argument("--dataset", type=str,
default="nerf_syn", help='type of dataset')
parser.add_argument("--data-dir", type=str,
default="/root/nerf_synthetic", help='directory of the dataset')
parser.add_argument("--obj", type=str,
default="chair", help='object name')
parser.add_argument("--n-views", type=int,
default=6, help='number of input views')
# model
parser.add_argument("--model-res", type=int,
default=20, help='noise resolution (should be about 1/40 the provided image resolution), ignored when load-image is set')
parser.add_argument("--model-ch", type=int,
default=8, help='noise channel')
parser.add_argument("--n-rays-init", type=int,
default=2**12, help='number of rays per batch initially')
parser.add_argument("--n-rays-up", type=int,
default=2**16, help='number of rays per batch after 100 iterations')
parser.add_argument("--learn-bg", action='store_true', help='if learn background')
parser.add_argument("--bg-color", type=float,
default=1.0, help='background color')
parser.add_argument("--rep", type=str, choices=['dif', 'tensorf'],
default="dif", help="representation to use")
# training
parser.add_argument("--net-lr", type=float,
default=0.002, help='learning rate')
parser.add_argument("--seed", type=int,
default=1337, help='random seed')
parser.add_argument("--n-val", type=int,
default=1, help='number of validate views')
parser.add_argument("--net-lr-decay-to", type=float,
default=0.002, help='lr decay rate')
parser.add_argument("--n-iters", type=int,
default=10000, help='number of iterations')
parser.add_argument("--val-iter", type=int,
default=1000, help='valid every k iterations')
parser.add_argument("--device", type=str,
default="cuda:0", help='device name')
if cmd is not None:
return parser.parse_args(cmd)
else:
return parser.parse_args()
# Path: zerorf.py
import sys
import shutil
import os
import cv2
import tqdm
import json
import numpy
import wandb
import torch
import torch_redstone as rst
import einops
from sklearn.cluster import KMeans
from lib.models.autoencoders import MultiSceneNeRF
from mmgen.models import build_model, build_module
from lib.core.optimizer import build_optimizers
from lib.core.ssdnerf_gui import OrbitCamera
from lib.datasets.nerf_synthetic import NerfSynthetic
from lib.datasets.oppo import OppoDataset
from PIL import Image
from opt import config_parser
from pprint import pprint
sys.path.append('.')
torch.backends.cuda.matmul.allow_tf32 = True
def kmeans_downsample(points, n_points_to_sample):
kmeans = KMeans(n_points_to_sample).fit(points)
return ((points - kmeans.cluster_centers_[..., None, :]) ** 2).sum(-1).argmin(-1).tolist()
args = config_parser()
pprint(args)
model_scaling_factor = 16
device = args.device
BLENDER_TO_OPENCV_MATRIX = numpy.array([
[1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1]
], dtype=numpy.float32)
code_size = (3, args.model_ch, args.model_res, args.model_res)
rst.seed(args.seed)
poses = []
intrinsics = []
if args.load_image:
image = numpy.array(Image.open(args.load_image)).astype(numpy.float32) / 255.0
image = torch.tensor(image).cuda()
images = einops.rearrange(image, '(ph h) (pw w) c -> (ph pw) h w c', ph=3, pw=2)[None]
meta = json.load(open(os.path.join(os.path.dirname(__file__), "meta.json")))
poses = numpy.array([
(numpy.array(frame['transform_matrix']) @ BLENDER_TO_OPENCV_MATRIX) * 2
for frame in meta['sample_0']['view_frames']
])
_, b, h, w, c = images.shape
x, y = w / 2, h / 2
focal_length = y / numpy.tan(meta['fovy'] / 2)
intrinsics = numpy.array([[focal_length, focal_length, x, y]] * args.n_views)
work_dir = "results/%s" % args.proj_name
os.makedirs(work_dir, exist_ok=True)
os.chdir(work_dir)
if not args.load_image:
if args.dataset == "nerf_syn":
model_scale = dict(chair=2.1, drums=2.3, ficus=2.3, hotdog=3.0, lego=2.4, materials=2.4, mic=2.5, ship=2.75)
world_scale = 2 / model_scale[args.obj]
dataset = NerfSynthetic([f"{args.data_dir}/{args.obj}/transforms_train.json"], rgba=True, world_scale=world_scale)
val = NerfSynthetic([f"{args.data_dir}/{args.obj}/transforms_val.json"], world_scale=world_scale)
test = NerfSynthetic([f"{args.data_dir}/{args.obj}/transforms_test.json"], world_scale=world_scale)
entry = dataset[0]
selected_idxs = kmeans_downsample(entry['cond_poses'][..., :3, 3], args.n_views)
elif args.dataset == "oi":
world_scale = 5.0
dataset = OppoDataset(f"{args.data_dir}/{args.obj}/output", split='train', world_scale=world_scale, rgba=True)
val = OppoDataset(f"{args.data_dir}/{args.obj}/output", split='test', world_scale=world_scale)
test = OppoDataset(f"{args.data_dir}/{args.obj}/output", split='test', world_scale=world_scale)
entry = dataset[0]
if args.n_views == 6:
selected_idxs = [10, 3, 19, 22, 17, 35]
elif args.n_views == 4:
selected_idxs = [10, 33, 35, 6]
else:
selected_idxs = kmeans_downsample(entry['cond_poses'][..., :3, 3], args.n_views)
data_entry = dict(
cond_imgs=torch.tensor(entry['cond_imgs'][selected_idxs][None]).float().to(device),
cond_poses=torch.tensor(entry['cond_poses'])[selected_idxs][None].float().to(device),
cond_intrinsics=torch.tensor(entry['cond_intrinsics'])[selected_idxs][None].float().to(device),
scene_id=[0],
scene_name=[args.proj_name]
)
entry = val[0]
val_entry = dict(
test_imgs=torch.tensor(entry['cond_imgs'][:args.n_val][None]).float().to(device),
test_poses=torch.tensor(entry['cond_poses'][:args.n_val])[None].float().to(device),
test_intrinsics=torch.tensor(entry['cond_intrinsics'][:args.n_val])[None].float().to(device),
scene_id=[0],
scene_name=[args.proj_name]
)
entry = test[0]
test_entry = dict(
test_imgs=torch.tensor(entry['cond_imgs'][:][None]).float().to(device),
test_poses=torch.tensor(entry['cond_poses'][:])[None].float().to(device),
test_intrinsics=torch.tensor(entry['cond_intrinsics'][:])[None].float().to(device),
scene_id=[0],
scene_name=[args.proj_name]
)
| else: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: baidubce/app-builder
# Path: appbuilder/core/constants.py
GATEWAY_URL = "https://appbuilder.baidu.com"
# Path: appbuilder/core/constants.py
GATEWAY_INNER_URL = "http://appbuilder.sdns.baidu.com"
# Path: appbuilder/core/component.py
class Component:
r"""Component基类, 其它实现的Component子类需要继承该基类,并至少实现run方法."""
def __init__(self,
meta: Optional[ComponentArguments] = ComponentArguments(),
secret_key: Optional[str] = None,
gateway: str = ""
):
r"""Component初始化方法.
参数:
meta (obj: `ComponentArguments`, 可选) : component元信息.
secret_key(str,可选): 用户鉴权token, 默认从环境变量中获取: os.getenv("APPBUILDER_TOKEN", "").
gateway(str, 可选): 后端网关服务地址,默认从环境变量中获取: os.getenv("GATEWAY_URL", "")
返回:
无
"""
self.meta = meta
self.http_client = HTTPClient(secret_key, gateway)
def __call__(self, *inputs, **kwargs):
r"""implement __call__ method"""
return self.run(*inputs, **kwargs)
def run(self, *inputs, **kwargs):
r"""
Defines the computation performed at every call.
Should be overridden by all subclasses.
Parameters:
*inputs(tuple): unpacked tuple arguments
**kwargs(dict): unpacked dict arguments
"""
raise NotImplementedError
def batch(self, *args, **kwargs) -> List[Message]:
r"""pass"""
return None
async def arun(self, *args, **kwargs) -> Optional[Message]:
r"""pass"""
return None
async def abatch(self, *args, **kwargs) -> List[Message]:
r"""pass"""
return None
def _trace(self, **data) -> None:
r"""pass"""
pass
def _debug(self, **data) -> None:
r"""pass"""
pass
# Path: appbuilder/core/message.py
class Message(BaseModel, Generic[_T]):
content: Optional[_T] = {}
name: Optional[str] = "msg"
mtype: Optional[str] = "dict"
id: Optional[str] = str(uuid.uuid4())
def __init__(self, content: Optional[_T] = None, **data):
if content is not None:
data['content'] = content
super().__init__(**data)
self.mtype = type(self.content).__name__
def __str__(self):
return f"Message(name={self.name}, content={self.content}, mtype={self.mtype})"
def __repr__(self):
return f"{self.__class__.__name__}(name={self.name!r}, content={self.content!r}, mtype={self.mtype!r})"
# Path: appbuilder/core/message.py
_T = TypeVar("_T")
# Path: appbuilder/utils/logger_util.py
LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '[%(asctime)s.%(msecs)03d] %(filename)s [line:%(lineno)d] %(levelname)s [%(logid)s] %(message)s',
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'standard',
'stream': 'ext://sys.stdout', # Use standard output
},
},
'loggers': {
'appbuilder': {
'handlers': ['console'],
'level': 'INFO',
'propagate': True,
},
}
}
class LoggerWithLoggerId(logging.LoggerAdapter):
def __init__(self, logger, extra, loglevel):
def set_auto_logid(self):
def set_logid(self, logid):
def get_logid(self):
def level(self):
def process(self, msg, kwargs):
def _setup_logging():
# Path: appbuilder/core/component.py
class ComponentArguments(BaseModel):
r""""ComponentArguments define Component meta fields"""
name: str = ""
tool_desc: Dict[str, Any] = {}
def extract_values_to_dict(self):
r"""extract ComponentArguments fields to dict"""
inputs = {}
for field_name, field in self.__fields__.items():
value = getattr(self, field_name)
# 获取 display_name 元数据
variable_name = field.field_info.extra.get('variable_name')
if variable_name:
# 使用 Enum 成员的实际值
if isinstance(value, Message):
inputs[variable_name] = str(value.content)
elif isinstance(value, Enum):
inputs[variable_name] = str(value.value)
else:
inputs[variable_name] = str(value)
else:
inputs[field_name] = value
return inputs
# Path: appbuilder/core/_exception.py
class AppBuilderServerException(BaseRPCException):
r"""AppBuilderServerException represent backend server failed response.
"""
def __init__(self, request_id="", code="", message="", service_err_code="", service_err_message=""):
r"""__init__ a AppBuilderServerException instance.
:param request_id: str, request unique id.
:param code: str, backend .
:rtype:
"""
super().__init__("request_id={}, code={}, message={}, service_err_code={}, service_err_message={} ".format(
request_id, code, message, service_err_code, service_err_message))
# Path: appbuilder/core/utils.py
class ModelInfo:
""" 模型信息类 """
def __init__(self, client: HTTPClient):
"""根据模型名称获取并初始化模型信息"""
self.client = client
response = Models(client).list()
self.model_list = [*response.result.common, *response.result.custom]
def get_model_url(self, model_name: str) -> str:
"""获取模型在工作台网关的请求url"""
origin_name = model_name
for key, value in model_name_mapping.items():
if origin_name == value:
origin_name = key
break
for model in self.model_list:
if model.name == origin_name:
return convert_cloudhub_url(self.client, model.url)
raise ModelNotSupportedException(f"Model[{model_name}] not available! "
f"You can query available models through: appbuilder.get_model_list()")
def get_model_type(self, model_name: str) -> str:
"""获取模型类型"""
origin_name = model_name
for key, value in model_name_mapping.items():
if origin_name == value:
origin_name = key
break
for model in self.model_list:
if model.name == origin_name:
return model.apiType
raise ModelNotSupportedException(f"Model[{model_name}] not available! "
f"You can query available models through: appbuilder.get_model_list()")
# Path: appbuilder/utils/sse_util.py
class SSEClient:
"""
一个简易的SSE Client,用于接收服务端发送的SSE事件。
"""
def __init__(self, event_source, char_enc='utf-8'):
"""
通过现有的事件源初始化 SSE 客户端。
事件源应为二进制流,并具有 close() 方法。
这通常是实现 io.BinaryIOBase 的东西,比如 httplib 或 urllib3HTTPResponse 对象。
"""
logging.info(f'Initialized SSE client from event source {event_source}')
self._event_source = event_source
self._char_enc = char_enc
def _read(self):
"""
读取传入的事件源流并生成事件块。
不幸的是,有些服务器可能会决定在响应中将事件分解为多个HTTP块。
因此,有必要正确地将连续的响应块缝合在一起,并找到SSE分隔符(空的新行),以生成完整、正确的事件块。
"""
data = b''
for chunk in self._event_source:
for line in chunk.splitlines(True):
data += line
if data.endswith((b'\r\r', b'\n\n', b'\r\n\r\n')):
yield data
data = b''
if data:
yield data
def events(self):
"""
从给定的输入流中读取 Server-Side-Event (SSE) 数据,并生成解析后的 Event 对象。
Args:
无
Returns:
generator: 解析后的 Event 对象的生成器。
"""
for chunk in self._read():
event = Event()
# Split before decoding so splitlines() only uses \r and \n
for line in chunk.splitlines():
# Decode the line.
line = line.decode(self._char_enc)
# Lines starting with a separator are comments and are to be
# ignored.
if not line.strip() or line.startswith(':'):
continue
logging.debug(f"raw line: {line}")
data = line.split(':', 1)
field = data[0]
# Ignore unknown fields.
if field not in event.__dict__:
logging.info(f'Saw invalid field {field} while parsing Server Side Event')
continue
if len(data) > 1:
# From the spec:
# "If value starts with a single U+0020 SPACE character,
# remove it from value."
if data[1].startswith(' '):
value = data[1][1:]
else:
value = data[1]
else:
# If no value is present after the separator,
# assume an empty value.
value = ''
# The data field may come over multiple lines and their values
# are concatenated with each other.
if field == 'data':
event.__dict__[field] += value + '\n'
else:
event.__dict__[field] = value
# Events with no data are not dispatched.
if not event.data:
continue
# If the data field ends with a newline, remove it.
if event.data.endswith('\n'):
event.data = event.data[0:-1]
# Empty event names default to 'message'
event.event = event.event or 'message'
# Dispatch the event
logging.info(f'Dispatching {event}...')
yield event
def close(self):
"""
手动关闭事件源流。
"""
self._event_source.close()
# Path: appbuilder/core/components/llms/base.py
import itertools
import json
import uuid
import requests
from enum import Enum
from appbuilder.core.constants import GATEWAY_URL, GATEWAY_INNER_URL
from pydantic import BaseModel, Field, ValidationError, HttpUrl, validator
from pydantic.types import confloat
from appbuilder.core.component import Component
from appbuilder.core.message import Message, _T
from appbuilder.utils.logger_util import logger
from typing import Dict, List, Optional, Any
from appbuilder.core.component import ComponentArguments
from appbuilder.core._exception import AppBuilderServerException
from appbuilder.core.utils import ModelInfo
from appbuilder.utils.sse_util import SSEClient
from collections.abc import Generator
}
}
def __init__(self, meta: ComponentArguments, model=None, secret_key: Optional[str] = None,
gateway: str = ""):
"""
Args:
meta (ComponentArguments): 组件参数信息
model (str, optional): 模型名称. Defaults to None.
secret_key (Optional[str], optional): 可选的密钥. Defaults to None.
gateway (str, optional): 网关地址. Defaults to "".
"""
super().__init__(meta=meta, secret_key=secret_key, gateway=gateway)
if not self.__class__.model_info:
self.__class__.model_info = ModelInfo(client=self.http_client)
self.model_url = self.model_info.get_model_url(model)
self.model_name = model
if not self.model_name and not self.model_url:
raise ValueError("model_name or model_url must be provided")
m_type = self.model_info.get_model_type(model)
if m_type != self.model_type:
raise ModelNotSupportedException(f"Model {model} with type [{m_type}] not supported, only support {self.model_type} type")
self.version = self.version
def gene_request(self, query, inputs, response_mode, message_id, model_config):
""""send request"""
data = {
"query": query,
"inputs": inputs,
"response_mode": response_mode,
"user": message_id,
"model_config": model_config
}
request = CompletionRequest(data, response_mode)
return request
def gene_response(self, response, stream: bool = False):
"""generate response"""
response = CompletionResponse(response, stream)
return response
def run(self, *args, **kwargs):
"""
Run the model with given input and return the result.
Args:
**kwargs: Keyword arguments for both StyleWritingComponent and common component inputs.
Returns:
obj:`Message`: Output message after running model.
"""
specific_params = {k: v for k, v in kwargs.items() if k in self.meta.__fields__}
model_config_params = {k: v for k, v in kwargs.items() if k in ModelArgsConfig.__fields__}
try:
specific_inputs = self.meta(**specific_params)
model_config_inputs = ModelArgsConfig(**model_config_params)
except ValidationError as e:
raise ValueError(e)
query, inputs, response_mode, user_id = self.get_compeliton_params(specific_inputs, model_config_inputs)
model_config = self.get_model_config(model_config_inputs)
request = self.gene_request(query, inputs, response_mode, user_id, model_config)
response = self.completion(self.version, self.base_url, request)
if response.error_no != 0:
raise AppBuilderServerException(service_err_code=response.error_no, service_err_message=response.error_msg)
return response.to_message()
def get_compeliton_params(self, specific_inputs, model_config_inputs):
"""获取模型请求参数"""
inputs = specific_inputs.extract_values_to_dict()
query = inputs["query"]
user_id = str(uuid.uuid4())
if model_config_inputs.stream:
response_mode = "streaming"
else:
response_mode = "blocking"
return query, inputs, response_mode, user_id
def get_model_config(self, model_config_inputs):
"""获取模型配置信息"""
if self.model_url:
self.model_config["model"]["url"] = self.model_url
if self.model_name:
self.model_config["model"]["name"] = self.model_name
self.model_config["model"]["completion_params"]["temperature"] = model_config_inputs.temperature
self.model_config["model"]["completion_params"]["top_p"] = model_config_inputs.top_p
return self.model_config
def completion(self, version, base_url, request: CompletionRequest, timeout: float = None,
retry: int = 0) -> CompletionResponse:
r"""Send a byte array of an audio file to obtain the result of speech recognition."""
headers = self.http_client.auth_header()
headers["Content-Type"] = "application/json"
completion_url = "/" + self.version + "/api/llm/" + self.name
stream = True if request.response_mode == "streaming" else False
url = self.http_client.service_url(completion_url, self.base_url)
logger.debug(
"request url: {}, method: {}, json: {}, headers: {}".format(url,
"POST",
| request.params, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: corfyi/UCMCTrack
# Path: detector/detector.py
class Detector:
def __init__(self):
self.seq_length = 0
self.gmc = None
def load(self,cam_para_file, det_file, gmc_file = None):
self.mapper = Mapper(cam_para_file,"MOT17")
self.load_detfile(det_file)
if gmc_file is not None:
self.gmc = GMCLoader(gmc_file)
def load_detfile(self, filename):
self.dets = dict()
# 打开文本文件filename
with open(filename, 'r') as f:
# 读取文件中的每一行
for line in f.readlines():
# 将每一行的内容按照空格分开
line = line.strip().split(',')
frame_id = int(line[0])
if frame_id > self.seq_length:
self.seq_length = frame_id
det_id = int(line[1])
# 新建一个Detection对象
det = Detection(det_id)
det.bb_left = float(line[2])
det.bb_top = float(line[3])
det.bb_width = float(line[4])
det.bb_height = float(line[5])
det.conf = float(line[6])
det.det_class = int(line[7])
if det.det_class == -1:
det.det_class = 0
det.y,det.R = self.mapper.mapto([det.bb_left,det.bb_top,det.bb_width,det.bb_height])
# 将det添加到字典中
if frame_id not in self.dets:
self.dets[frame_id] = []
self.dets[frame_id].append(det)
def get_dets(self, frame_id,conf_thresh = 0,det_class = 0):
dets = self.dets[frame_id]
dets = [det for det in dets if det.det_class == det_class and det.conf >= conf_thresh]
return dets
def cmc(self,x,y,w,h,frame_id):
u,v = self.mapper.xy2uv(x,y)
affine = self.gmc.get_affine(frame_id)
M = affine[:,:2]
T = np.zeros((2,1))
T[0,0] = affine[0,2]
T[1,0] = affine[1,2]
p_center = np.array([[u],[v-h/2]])
p_wh = np.array([[w],[h]])
p_center = np.dot(M,p_center) + T
p_wh = np.dot(M,p_wh)
u = p_center[0,0]
v = p_center[1,0]+p_wh[1,0]/2
xy,_ = self.mapper.uv2xy(np.array([[u],[v]]),np.eye(2))
return xy[0,0],xy[1,0]
# Path: detector/detector.py
class Detection:
def __init__(self, id, bb_left = 0, bb_top = 0, bb_width = 0, bb_height = 0, conf = 0, det_class = 0):
self.id = id
self.bb_left = bb_left
self.bb_top = bb_top
self.bb_width = bb_width
self.bb_height = bb_height
self.conf = conf
self.det_class = det_class
self.track_id = 0
self.y = np.zeros((2, 1))
self.R = np.eye(4)
def get_box(self):
return [self.bb_left, self.bb_top, self.bb_width, self.bb_height]
def __str__(self):
return 'd{}, bb_box:[{},{},{},{}], conf={:.2f}, class{}, uv:[{:.0f},{:.0f}], mapped to:[{:.1f},{:.1f}]'.format(
self.id, self.bb_left, self.bb_top, self.bb_width, self.bb_height, self.conf, self.det_class,
self.bb_left+self.bb_width/2,self.bb_top+self.bb_height,self.y[0,0],self.y[1,0])
def __repr__(self):
return self.__str__()
# Path: tracker/ucmc.py
class UCMCTrack(object):
def __init__(self,a1,a2,wx, wy,vmax, max_age, fps, dataset, high_score, use_cmc,detector = None):
self.wx = wx
self.wy = wy
self.vmax = vmax
self.dataset = dataset
self.high_score = high_score
self.max_age = max_age
self.a1 = a1
self.a2 = a2
self.dt = 1.0/fps
self.use_cmc = use_cmc
self.trackers = []
self.confirmed_idx = []
self.coasted_idx = []
self.tentative_idx = []
self.detector = detector
def update(self, dets,frame_id):
self.data_association(dets,frame_id)
self.associate_tentative(dets)
self.initial_tentative(dets)
self.delete_old_trackers()
self.update_status(dets)
def data_association(self, dets,frame_id):
# Separate detections into high score and low score
detidx_high = []
detidx_low = []
for i in range(len(dets)):
if dets[i].conf >= self.high_score:
detidx_high.append(i)
else:
detidx_low.append(i)
# Predcit new locations of tracks
for track in self.trackers:
track.predict()
if self.use_cmc:
x,y = self.detector.cmc(track.kf.x[0,0],track.kf.x[2,0],track.w,track.h,frame_id)
track.kf.x[0,0] = x
track.kf.x[2,0] = y
trackidx_remain = []
self.detidx_remain = []
# Associate high score detections with tracks
trackidx = self.confirmed_idx + self.coasted_idx
num_det = len(detidx_high)
num_trk = len(trackidx)
for trk in self.trackers:
trk.detidx = -1
if num_det*num_trk > 0:
cost_matrix = np.zeros((num_det, num_trk))
for i in range(num_det):
det_idx = detidx_high[i]
for j in range(num_trk):
trk_idx = trackidx[j]
cost_matrix[i,j] = self.trackers[trk_idx].distance(dets[det_idx].y, dets[det_idx].R)
matched_indices,unmatched_a,unmatched_b = linear_assignment(cost_matrix, self.a1)
for i in unmatched_a:
self.detidx_remain.append(detidx_high[i])
for i in unmatched_b:
trackidx_remain.append(trackidx[i])
for i,j in matched_indices:
det_idx = detidx_high[i]
trk_idx = trackidx[j]
self.trackers[trk_idx].update(dets[det_idx].y, dets[det_idx].R)
self.trackers[trk_idx].death_count = 0
self.trackers[trk_idx].detidx = det_idx
self.trackers[trk_idx].status = TrackStatus.Confirmed
dets[det_idx].track_id = self.trackers[trk_idx].id
else:
self.detidx_remain = detidx_high
trackidx_remain = trackidx
# Associate low score detections with remain tracks
num_det = len(detidx_low)
num_trk = len(trackidx_remain)
if num_det*num_trk > 0:
cost_matrix = np.zeros((num_det, num_trk))
for i in range(num_det):
det_idx = detidx_low[i]
for j in range(num_trk):
trk_idx = trackidx_remain[j]
cost_matrix[i,j] = self.trackers[trk_idx].distance(dets[det_idx].y, dets[det_idx].R)
matched_indices,unmatched_a,unmatched_b = linear_assignment(cost_matrix,self.a2)
for i in unmatched_b:
trk_idx = trackidx_remain[i]
self.trackers[trk_idx].status = TrackStatus.Coasted
# self.trackers[trk_idx].death_count += 1
self.trackers[trk_idx].detidx = -1
for i,j in matched_indices:
det_idx = detidx_low[i]
trk_idx = trackidx_remain[j]
self.trackers[trk_idx].update(dets[det_idx].y, dets[det_idx].R)
self.trackers[trk_idx].death_count = 0
self.trackers[trk_idx].detidx = det_idx
self.trackers[trk_idx].status = TrackStatus.Confirmed
dets[det_idx].track_id = self.trackers[trk_idx].id
def associate_tentative(self, dets):
num_det = len(self.detidx_remain)
num_trk = len(self.tentative_idx)
cost_matrix = np.zeros((num_det, num_trk))
for i in range(num_det):
det_idx = self.detidx_remain[i]
for j in range(num_trk):
trk_idx = self.tentative_idx[j]
cost_matrix[i,j] = self.trackers[trk_idx].distance(dets[det_idx].y, dets[det_idx].R)
matched_indices,unmatched_a,unmatched_b = linear_assignment(cost_matrix,self.a1)
for i,j in matched_indices:
det_idx = self.detidx_remain[i]
trk_idx = self.tentative_idx[j]
self.trackers[trk_idx].update(dets[det_idx].y, dets[det_idx].R)
self.trackers[trk_idx].death_count = 0
self.trackers[trk_idx].birth_count += 1
self.trackers[trk_idx].detidx = det_idx
dets[det_idx].track_id = self.trackers[trk_idx].id
if self.trackers[trk_idx].birth_count >= 2:
self.trackers[trk_idx].birth_count = 0
self.trackers[trk_idx].status = TrackStatus.Confirmed
for i in unmatched_b:
trk_idx = self.tentative_idx[i]
# self.trackers[trk_idx].death_count += 1
self.trackers[trk_idx].detidx = -1
unmatched_detidx = []
for i in unmatched_a:
unmatched_detidx.append(self.detidx_remain[i])
self.detidx_remain = unmatched_detidx
def initial_tentative(self,dets):
for i in self.detidx_remain:
self.trackers.append(KalmanTracker(dets[i].y,dets[i].R,self.wx,self.wy,self.vmax, dets[i].bb_width,dets[i].bb_height,self.dt))
self.trackers[-1].status = TrackStatus.Tentative
self.trackers[-1].detidx = i
self.detidx_remain = []
def delete_old_trackers(self):
i = len(self.trackers)
for trk in reversed(self.trackers):
trk.death_count += 1
i -= 1
if ( trk.status == TrackStatus.Coasted and trk.death_count >= self.max_age) or ( trk.status == TrackStatus.Tentative and trk.death_count >= 2):
self.trackers.pop(i)
def update_status(self,dets):
self.confirmed_idx = []
self.coasted_idx = []
self.tentative_idx = []
for i in range(len(self.trackers)):
detidx = self.trackers[i].detidx
if detidx >= 0 and detidx < len(dets):
self.trackers[i].h = dets[detidx].bb_height
self.trackers[i].w = dets[detidx].bb_width
if self.trackers[i].status == TrackStatus.Confirmed:
self.confirmed_idx.append(i)
elif self.trackers[i].status == TrackStatus.Coasted:
self.coasted_idx.append(i)
elif self.trackers[i].status == TrackStatus.Tentative:
self.tentative_idx.append(i)
# Path: tracker/kalman.py
class TrackStatus(Enum):
Tentative = 0
Confirmed = 1
Coasted = 2
# Path: eval/interpolation.py
def interpolate(txt_path, save_path, n_min=3, n_dti=20, is_enable = True):
mkdir_if_missing(txt_path)
mkdir_if_missing(save_path)
if is_enable:
dti(txt_path, save_path, n_min, n_dti)
else:
#拷贝txt_path下的文件到save_path
for file in os.listdir(txt_path):
if file.endswith(".txt"):
shutil.copy(os.path.join(txt_path,file),os.path.join(save_path,file))
# Path: util/run_ucmc.py
from detector.detector import Detector, Detection
from tracker.ucmc import UCMCTrack
from tracker.kalman import TrackStatus
from eval.interpolation import interpolate
import os,time
import argparse
class Tracklet():
def __init__(self,frame_id,box):
self.is_active = False
self.boxes = dict()
self.boxes[frame_id] = box
def add_box(self, frame_id, box):
self.boxes[frame_id] = box
def activate(self):
self.is_active = True
def make_args():
parser = argparse.ArgumentParser(description='Process some arguments.')
parser.add_argument('--seq', type=str, default = "MOT17-02", help='seq name')
parser.add_argument('--fps', type=float, default=30.0, help='fps')
parser.add_argument('--wx', type=float, default=0.1, help='wx')
parser.add_argument('--wy', type=float, default=0.1, help='wy')
parser.add_argument('--vmax', type=float, default=0.5, help='vmax')
parser.add_argument('--a', type=float, default=10.0, help='assignment threshold')
parser.add_argument('--cdt', type=float, default=30.0, help='coasted deletion time')
parser.add_argument('--high_score', type=float, default=0.6, help='high score threshold')
parser.add_argument('--conf_thresh', type=float, default=0.5, help='detection confidence threshold')
parser.add_argument("--cmc", action="store_true", help="use cmc or not.")
parser.add_argument("--hp", action="store_true", help="use head padding or not.")
args = parser.parse_args()
return args
def run_ucmc(args, det_path = "det_results/mot17/yolox_x_ablation",
cam_path = "cam_para/mot17",
gmc_path = "gmc/mot17",
out_path = "output/mot17",
exp_name = "val",
dataset = "MOT17"):
seq_name = args.seq
eval_path = os.path.join(out_path,exp_name)
orig_save_path = os.path.join(eval_path,seq_name)
if not os.path.exists(orig_save_path):
os.makedirs(orig_save_path)
if dataset == "MOT17":
det_file = os.path.join(det_path, f"{seq_name}-SDP.txt")
cam_para = os.path.join(cam_path, f"{seq_name}-SDP.txt")
result_file = os.path.join(orig_save_path,f"{seq_name}-SDP.txt")
elif dataset == "MOT20":
det_file = os.path.join(det_path, f"{seq_name}.txt")
cam_para = os.path.join(cam_path, f"{seq_name}.txt")
result_file = os.path.join(orig_save_path,f"{seq_name}.txt")
gmc_file = os.path.join(gmc_path, f"GMC-{seq_name}.txt")
print(det_file)
print(cam_para)
detector = Detector()
detector.load(cam_para, det_file,gmc_file)
print(f"seq_length = {detector.seq_length}")
a1 = args.a
a2 = args.a
high_score = args.high_score
conf_thresh = args.conf_thresh
fps = args.fps
cdt = args.cdt
wx = args.wx
wy = args.wy
vmax = args.vmax
tracker = UCMCTrack(a1, a2, wx,wy,vmax, cdt, fps, dataset, high_score,args.cmc,detector)
t1 = time.time()
tracklets = dict()
with open(result_file,"w") as f:
for frame_id in range(1, detector.seq_length + 1):
dets = detector.get_dets(frame_id, conf_thresh)
tracker.update(dets,frame_id)
if args.hp:
for i in tracker.tentative_idx:
t = tracker.trackers[i]
if(t.detidx < 0 or t.detidx >= len(dets)):
continue
if t.id not in tracklets:
tracklets[t.id] = Tracklet(frame_id, dets[t.detidx].get_box())
else:
tracklets[t.id].add_box(frame_id, dets[t.detidx].get_box())
for i in tracker.confirmed_idx:
t = tracker.trackers[i]
if(t.detidx < 0 or t.detidx >= len(dets)):
continue
if t.id not in tracklets:
tracklets[t.id] = Tracklet(frame_id, dets[t.detidx].get_box())
else:
tracklets[t.id].add_box(frame_id, dets[t.detidx].get_box())
tracklets[t.id].activate()
| else: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ingra14m/Specular-Gaussians
# Path: utils/rigid_utils.py
def exp_se3(S: torch.Tensor, theta: float) -> torch.Tensor:
"""Exponential map from Lie algebra so3 to Lie group SO3.
Modern Robotics Eqn 3.88.
Args:
S: (6,) A screw axis of motion.
theta: Magnitude of motion.
Returns:
a_X_b: (4, 4) The homogeneous transformation matrix attained by integrating
motion of magnitude theta about S for one second.
"""
w, v = torch.split(S, 3, dim=-1)
W = skew(w)
R = exp_so3(w, theta)
identity = torch.eye(3).unsqueeze(0).repeat(W.shape[0], 1, 1).to(W.device)
W_sqr = torch.bmm(W, W)
theta = theta.view(-1, 1, 1)
p = torch.bmm((theta * identity + (1.0 - torch.cos(theta)) * W + (theta - torch.sin(theta)) * W_sqr),
v.unsqueeze(-1))
return rp_to_se3(R, p)
# Path: utils/quaternion_utils.py
def init_predefined_omega(n_theta, n_phi):
theta_list = torch.linspace(0, np.pi, n_theta)
phi_list = torch.linspace(0, np.pi * 2, n_phi)
out_omega = []
out_omega_lambda = []
out_omega_mu = []
for i in range(n_theta):
theta = theta_list[i].view(1, 1)
for j in range(n_phi):
phi = phi_list[j].view(1, 1)
omega = spherical2cartesian(theta, phi)
omega = torch.stack(omega, dim=-1).view(1, 3)
omega_lambda = spherical2cartesian(theta + np.pi / 2, phi)
omega_lambda = torch.stack(omega_lambda, dim=-1).view(1, 3)
p = cartesian2quaternion(omega_lambda)
q = build_q(omega, torch.tensor(np.pi / 2).view(1, 1))
omega_mu = quaternion_rotate(p, q)[..., 1:]
out_omega.append(omega)
out_omega_lambda.append(omega_lambda)
out_omega_mu.append(omega_mu)
out_omega = torch.stack(out_omega, dim=0)
out_omega_lambda = torch.stack(out_omega_lambda, dim=0)
out_omega_mu = torch.stack(out_omega_mu, dim=0)
return out_omega, out_omega_lambda, out_omega_mu
# Path: utils/general_utils.py
def linear_to_srgb(linear):
if isinstance(linear, torch.Tensor):
"""Assumes `linear` is in [0, 1], see https://en.wikipedia.org/wiki/SRGB."""
eps = torch.finfo(torch.float32).eps
srgb0 = 323 / 25 * linear
srgb1 = (211 * torch.clamp(linear, min=eps) ** (5 / 12) - 11) / 200
return torch.where(linear <= 0.0031308, srgb0, srgb1)
elif isinstance(linear, np.ndarray):
eps = np.finfo(np.float32).eps
srgb0 = 323 / 25 * linear
srgb1 = (211 * np.maximum(eps, linear) ** (5 / 12) - 11) / 200
return np.where(linear <= 0.0031308, srgb0, srgb1)
else:
raise NotImplementedError
# Path: utils/ref_utils.py
def generate_ide_fn(deg_view):
"""Generate integrated directional encoding (IDE) function.
This function returns a function that computes the integrated directional
encoding from Equations 6-8 of arxiv.org/abs/2112.03907.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating integrated directional encoding.
Raises:
ValueError: if deg_view is larger than 5.
"""
if deg_view > 5:
raise ValueError('Only deg_view of at most 5 is numerically stable.')
ml_array = get_ml_array(deg_view)
l_max = 2 ** (deg_view - 1)
# Create a matrix corresponding to ml_array holding all coefficients, which,
# when multiplied (from the right) by the z coordinate Vandermonde matrix,
# results in the z component of the encoding.
mat = np.zeros((l_max + 1, ml_array.shape[1]))
for i, (m, l) in enumerate(ml_array.T):
for k in range(l - m + 1):
mat[k, i] = sph_harm_coeff(l, m, k)
mat = torch.from_numpy(mat.astype(np.float32)).cuda()
ml_array = torch.from_numpy(ml_array.astype(np.float32)).cuda()
def integrated_dir_enc_fn(xyz, kappa_inv):
"""Function returning integrated directional encoding (IDE).
Args:
xyz: [..., 3] array of Cartesian coordinates of directions to evaluate at.
kappa_inv: [..., 1] reciprocal of the concentration parameter of the von
Mises-Fisher distribution.
Returns:
An array with the resulting IDE.
"""
x = xyz[..., 0:1]
y = xyz[..., 1:2]
z = xyz[..., 2:3]
# Compute z Vandermonde matrix.
vmz = torch.concat([z ** i for i in range(mat.shape[0])], dim=-1)
# Compute x+iy Vandermonde matrix.
vmxy = torch.concat([(x + 1j * y) ** m for m in ml_array[0, :]], dim=-1)
# Get spherical harmonics.
sph_harms = vmxy * torch.matmul(vmz, mat)
# Apply attenuation function using the von Mises-Fisher distribution
# concentration parameter, kappa.
sigma = 0.5 * ml_array[1, :] * (ml_array[1, :] + 1)
ide = sph_harms * torch.exp(-sigma * kappa_inv)
# Split into real and imaginary parts and return
return torch.concat([torch.real(ide), torch.imag(ide)], dim=-1)
return integrated_dir_enc_fn
# Path: utils/spec_utils.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from utils.rigid_utils import exp_se3
from utils.quaternion_utils import init_predefined_omega
from utils.general_utils import linear_to_srgb
from utils.ref_utils import generate_ide_fn
return reflection
class ASGRender(torch.nn.Module):
def __init__(self, inChanel, viewpe=6, feape=6, featureC=128):
super(ASGRender, self).__init__()
self.num_theta = 4
self.num_phi = 8
self.ch_normal_dot_viewdir = 1
self.in_mlpC = 2 * viewpe * 3 + 3 + self.num_theta * self.num_phi * 2
self.viewpe = viewpe
self.ree_function = RenderingEquationEncoding(self.num_theta, self.num_phi, 'cuda')
layer1 = torch.nn.Linear(self.in_mlpC, featureC)
layer2 = torch.nn.Linear(featureC, featureC)
layer3 = torch.nn.Linear(featureC, 3)
self.mlp = torch.nn.Sequential(layer1, torch.nn.ReLU(inplace=True), layer2, torch.nn.ReLU(inplace=True), layer3)
torch.nn.init.constant_(self.mlp[-1].bias, 0)
def reflect(self, viewdir, normal):
out = 2 * (viewdir * normal).sum(dim=-1, keepdim=True) * normal - viewdir
return out
def safe_normalize(self, x, eps=1e-8):
return x / (torch.norm(x, dim=-1, keepdim=True) + eps)
def forward(self, pts, viewdirs, features):
asg_params = features.view(-1, self.num_theta, self.num_phi, 4) # [N, 8, 16, 4]
a, la, mu = torch.split(asg_params, [2, 1, 1], dim=-1)
color_feature = self.ree_function(viewdirs, a, la, mu)
# color_feature = color_feature.view(color_feature.size(0), -1, 3)
color_feature = color_feature.view(color_feature.size(0), -1) # [N, 256]
indata = [color_feature]
if self.viewpe > -1:
indata += [viewdirs]
if self.viewpe > 0:
indata += [positional_encoding(viewdirs, self.viewpe)]
mlp_in = torch.cat(indata, dim=-1)
rgb = self.mlp(mlp_in)
# rgb = torch.sum(color_feature, dim=1)
# rgb = torch.sigmoid(rgb)
return rgb
class IdentityActivation(nn.Module):
def forward(self, x): return x
class ExpActivation(nn.Module):
def __init__(self, max_light=5.0):
super().__init__()
self.max_light = max_light
def forward(self, x):
return torch.exp(torch.clamp(x, max=self.max_light))
def make_predictor(feats_dim: object, output_dim: object, weight_norm: object = True, activation='sigmoid',
exp_max=0.0) -> object:
if activation == 'sigmoid':
activation = nn.Sigmoid()
elif activation == 'exp':
activation = ExpActivation(max_light=exp_max)
elif activation == 'none':
activation = IdentityActivation()
elif activation == 'relu':
activation = nn.ReLU()
else:
raise NotImplementedError
run_dim = 256
if weight_norm:
module = nn.Sequential(
nn.utils.weight_norm(nn.Linear(feats_dim, run_dim)),
nn.ReLU(),
nn.utils.weight_norm(nn.Linear(run_dim, run_dim)),
nn.ReLU(),
nn.utils.weight_norm(nn.Linear(run_dim, run_dim)),
nn.ReLU(),
nn.utils.weight_norm(nn.Linear(run_dim, output_dim)),
activation,
)
else:
module = nn.Sequential(
nn.Linear(feats_dim, run_dim),
nn.ReLU(),
nn.Linear(run_dim, run_dim),
nn.ReLU(),
nn.Linear(run_dim, run_dim),
nn.ReLU(),
nn.Linear(run_dim, output_dim),
activation,
)
return module
class AppShadingNetwork(nn.Module):
default_cfg = {
'human_light': False,
'sphere_direction': False,
'light_pos_freq': 8,
'inner_init': -0.95,
'roughness_init': 0.0,
'metallic_init': 0.0,
'light_exp_max': 0.0,
}
def __init__(self):
super().__init__()
self.cfg = {**self.default_cfg}
feats_dim = 256
FG_LUT = torch.from_numpy(np.fromfile('assets/bsdf_256_256.bin', dtype=np.float32).reshape(1, 256, 256, 2))
| self.register_buffer('FG_LUT', FG_LUT) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Artiprocher/DiffSynth-Studio
# Path: diffsynth/models/attention.py
class Attention(torch.nn.Module):
def __init__(self, q_dim, num_heads, head_dim, kv_dim=None, bias_q=False, bias_kv=False, bias_out=False):
super().__init__()
dim_inner = head_dim * num_heads
kv_dim = kv_dim if kv_dim is not None else q_dim
self.num_heads = num_heads
self.head_dim = head_dim
self.to_q = torch.nn.Linear(q_dim, dim_inner, bias=bias_q)
self.to_k = torch.nn.Linear(kv_dim, dim_inner, bias=bias_kv)
self.to_v = torch.nn.Linear(kv_dim, dim_inner, bias=bias_kv)
self.to_out = torch.nn.Linear(dim_inner, q_dim, bias=bias_out)
def torch_forward(self, hidden_states, encoder_hidden_states=None, attn_mask=None):
if encoder_hidden_states is None:
encoder_hidden_states = hidden_states
batch_size = encoder_hidden_states.shape[0]
q = self.to_q(hidden_states)
k = self.to_k(encoder_hidden_states)
v = self.to_v(encoder_hidden_states)
q = q.view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2)
k = k.view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2)
v = v.view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2)
hidden_states = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=attn_mask)
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, self.num_heads * self.head_dim)
hidden_states = hidden_states.to(q.dtype)
hidden_states = self.to_out(hidden_states)
return hidden_states
def xformers_forward(self, hidden_states, encoder_hidden_states=None, attn_mask=None):
if encoder_hidden_states is None:
encoder_hidden_states = hidden_states
q = self.to_q(hidden_states)
k = self.to_k(encoder_hidden_states)
v = self.to_v(encoder_hidden_states)
q = rearrange(q, "b f (n d) -> (b n) f d", n=self.num_heads)
k = rearrange(k, "b f (n d) -> (b n) f d", n=self.num_heads)
v = rearrange(v, "b f (n d) -> (b n) f d", n=self.num_heads)
if attn_mask is not None:
hidden_states = low_version_attention(q, k, v, attn_bias=attn_mask)
else:
import xformers.ops as xops
hidden_states = xops.memory_efficient_attention(q, k, v)
hidden_states = rearrange(hidden_states, "(b n) f d -> b f (n d)", n=self.num_heads)
hidden_states = hidden_states.to(q.dtype)
hidden_states = self.to_out(hidden_states)
return hidden_states
def forward(self, hidden_states, encoder_hidden_states=None, attn_mask=None):
return self.torch_forward(hidden_states, encoder_hidden_states=encoder_hidden_states, attn_mask=attn_mask)
# Path: diffsynth/models/sd_unet.py
class ResnetBlock(torch.nn.Module):
def __init__(self, in_channels, out_channels, temb_channels=None, groups=32, eps=1e-5):
super().__init__()
self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)
self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
if temb_channels is not None:
self.time_emb_proj = torch.nn.Linear(temb_channels, out_channels)
self.norm2 = torch.nn.GroupNorm(num_groups=groups, num_channels=out_channels, eps=eps, affine=True)
self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.nonlinearity = torch.nn.SiLU()
self.conv_shortcut = None
if in_channels != out_channels:
self.conv_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=True)
def forward(self, hidden_states, time_emb, text_emb, res_stack, **kwargs):
x = hidden_states
x = self.norm1(x)
x = self.nonlinearity(x)
x = self.conv1(x)
if time_emb is not None:
emb = self.nonlinearity(time_emb)
emb = self.time_emb_proj(emb)[:, :, None, None]
x = x + emb
x = self.norm2(x)
x = self.nonlinearity(x)
x = self.conv2(x)
if self.conv_shortcut is not None:
hidden_states = self.conv_shortcut(hidden_states)
hidden_states = hidden_states + x
return hidden_states, time_emb, text_emb, res_stack
# Path: diffsynth/models/sd_unet.py
class UpSampler(torch.nn.Module):
def __init__(self, channels):
super().__init__()
self.conv = torch.nn.Conv2d(channels, channels, 3, padding=1)
def forward(self, hidden_states, time_emb, text_emb, res_stack, **kwargs):
hidden_states = torch.nn.functional.interpolate(hidden_states, scale_factor=2.0, mode="nearest")
hidden_states = self.conv(hidden_states)
return hidden_states, time_emb, text_emb, res_stack
# Path: diffsynth/models/tiler.py
class TileWorker:
def __init__(self):
pass
def mask(self, height, width, border_width):
# Create a mask with shape (height, width).
# The centre area is filled with 1, and the border line is filled with values in range (0, 1].
x = torch.arange(height).repeat(width, 1).T
y = torch.arange(width).repeat(height, 1)
mask = torch.stack([x + 1, height - x, y + 1, width - y]).min(dim=0).values
mask = (mask / border_width).clip(0, 1)
return mask
def tile(self, model_input, tile_size, tile_stride, tile_device, tile_dtype):
# Convert a tensor (b, c, h, w) to (b, c, tile_size, tile_size, tile_num)
batch_size, channel, _, _ = model_input.shape
model_input = model_input.to(device=tile_device, dtype=tile_dtype)
unfold_operator = torch.nn.Unfold(
kernel_size=(tile_size, tile_size),
stride=(tile_stride, tile_stride)
)
model_input = unfold_operator(model_input)
model_input = model_input.view((batch_size, channel, tile_size, tile_size, -1))
return model_input
def tiled_inference(self, forward_fn, model_input, tile_batch_size, inference_device, inference_dtype, tile_device, tile_dtype):
# Call y=forward_fn(x) for each tile
tile_num = model_input.shape[-1]
model_output_stack = []
for tile_id in range(0, tile_num, tile_batch_size):
# process input
tile_id_ = min(tile_id + tile_batch_size, tile_num)
x = model_input[:, :, :, :, tile_id: tile_id_]
x = x.to(device=inference_device, dtype=inference_dtype)
x = rearrange(x, "b c h w n -> (n b) c h w")
# process output
y = forward_fn(x)
y = rearrange(y, "(n b) c h w -> b c h w n", n=tile_id_-tile_id)
y = y.to(device=tile_device, dtype=tile_dtype)
model_output_stack.append(y)
model_output = torch.concat(model_output_stack, dim=-1)
return model_output
def io_scale(self, model_output, tile_size):
# Determine the size modification happend in forward_fn
# We only consider the same scale on height and width.
io_scale = model_output.shape[2] / tile_size
return io_scale
def untile(self, model_output, height, width, tile_size, tile_stride, border_width, tile_device, tile_dtype):
# The reversed function of tile
mask = self.mask(tile_size, tile_size, border_width)
mask = mask.to(device=tile_device, dtype=tile_dtype)
mask = rearrange(mask, "h w -> 1 1 h w 1")
model_output = model_output * mask
fold_operator = torch.nn.Fold(
output_size=(height, width),
kernel_size=(tile_size, tile_size),
stride=(tile_stride, tile_stride)
)
mask = repeat(mask[0, 0, :, :, 0], "h w -> 1 (h w) n", n=model_output.shape[-1])
model_output = rearrange(model_output, "b c h w n -> b (c h w) n")
model_output = fold_operator(model_output) / fold_operator(mask)
return model_output
def tiled_forward(self, forward_fn, model_input, tile_size, tile_stride, tile_batch_size=1, tile_device="cpu", tile_dtype=torch.float32, border_width=None):
# Prepare
inference_device, inference_dtype = model_input.device, model_input.dtype
height, width = model_input.shape[2], model_input.shape[3]
border_width = int(tile_stride*0.5) if border_width is None else border_width
# tile
model_input = self.tile(model_input, tile_size, tile_stride, tile_device, tile_dtype)
# inference
model_output = self.tiled_inference(forward_fn, model_input, tile_batch_size, inference_device, inference_dtype, tile_device, tile_dtype)
# resize
io_scale = self.io_scale(model_output, tile_size)
height, width = int(height*io_scale), int(width*io_scale)
tile_size, tile_stride = int(tile_size*io_scale), int(tile_stride*io_scale)
border_width = int(border_width*io_scale)
# untile
model_output = self.untile(model_output, height, width, tile_size, tile_stride, border_width, tile_device, tile_dtype)
# Done!
model_output = model_output.to(device=inference_device, dtype=inference_dtype)
return model_output
# Path: diffsynth/models/sd_vae_decoder.py
import torch
from .attention import Attention
from .sd_unet import ResnetBlock, UpSampler
from .tiler import TileWorker
local_rename_dict = {
"post_quant_conv": "post_quant_conv",
"decoder.conv_in": "conv_in",
"decoder.mid_block.attentions.0.group_norm": "blocks.1.norm",
"decoder.mid_block.attentions.0.to_q": "blocks.1.transformer_blocks.0.to_q",
"decoder.mid_block.attentions.0.to_k": "blocks.1.transformer_blocks.0.to_k",
"decoder.mid_block.attentions.0.to_v": "blocks.1.transformer_blocks.0.to_v",
"decoder.mid_block.attentions.0.to_out.0": "blocks.1.transformer_blocks.0.to_out",
"decoder.mid_block.resnets.0.norm1": "blocks.0.norm1",
"decoder.mid_block.resnets.0.conv1": "blocks.0.conv1",
"decoder.mid_block.resnets.0.norm2": "blocks.0.norm2",
"decoder.mid_block.resnets.0.conv2": "blocks.0.conv2",
"decoder.mid_block.resnets.1.norm1": "blocks.2.norm1",
"decoder.mid_block.resnets.1.conv1": "blocks.2.conv1",
"decoder.mid_block.resnets.1.norm2": "blocks.2.norm2",
"decoder.mid_block.resnets.1.conv2": "blocks.2.conv2",
"decoder.conv_norm_out": "conv_norm_out",
"decoder.conv_out": "conv_out",
}
name_list = sorted([name for name in state_dict])
rename_dict = {}
block_id = {"ResnetBlock": 2, "DownSampler": 2, "UpSampler": 2}
last_block_type_with_id = {"ResnetBlock": "", "DownSampler": "", "UpSampler": ""}
for name in name_list:
names = name.split(".")
name_prefix = ".".join(names[:-1])
if name_prefix in local_rename_dict:
rename_dict[name] = local_rename_dict[name_prefix] + "." + names[-1]
elif name.startswith("decoder.up_blocks"):
block_type = {"resnets": "ResnetBlock", "downsamplers": "DownSampler", "upsamplers": "UpSampler"}[names[3]]
block_type_with_id = ".".join(names[:5])
if block_type_with_id != last_block_type_with_id[block_type]:
block_id[block_type] += 1
last_block_type_with_id[block_type] = block_type_with_id
while block_id[block_type] < len(block_types) and block_types[block_id[block_type]] != block_type:
block_id[block_type] += 1
block_type_with_id = ".".join(names[:5])
names = ["blocks", str(block_id[block_type])] + names[5:]
rename_dict[name] = ".".join(names)
# Convert state_dict
state_dict_ = {}
for name, param in state_dict.items():
if name in rename_dict:
state_dict_[rename_dict[name]] = param
return state_dict_
def from_civitai(self, state_dict):
rename_dict = {
"first_stage_model.decoder.conv_in.bias": "conv_in.bias",
"first_stage_model.decoder.conv_in.weight": "conv_in.weight",
"first_stage_model.decoder.conv_out.bias": "conv_out.bias",
"first_stage_model.decoder.conv_out.weight": "conv_out.weight",
"first_stage_model.decoder.mid.attn_1.k.bias": "blocks.1.transformer_blocks.0.to_k.bias",
"first_stage_model.decoder.mid.attn_1.k.weight": "blocks.1.transformer_blocks.0.to_k.weight",
"first_stage_model.decoder.mid.attn_1.norm.bias": "blocks.1.norm.bias",
"first_stage_model.decoder.mid.attn_1.norm.weight": "blocks.1.norm.weight",
"first_stage_model.decoder.mid.attn_1.proj_out.bias": "blocks.1.transformer_blocks.0.to_out.bias",
"first_stage_model.decoder.mid.attn_1.proj_out.weight": "blocks.1.transformer_blocks.0.to_out.weight",
"first_stage_model.decoder.mid.attn_1.q.bias": "blocks.1.transformer_blocks.0.to_q.bias",
"first_stage_model.decoder.mid.attn_1.q.weight": "blocks.1.transformer_blocks.0.to_q.weight",
"first_stage_model.decoder.mid.attn_1.v.bias": "blocks.1.transformer_blocks.0.to_v.bias",
"first_stage_model.decoder.mid.attn_1.v.weight": "blocks.1.transformer_blocks.0.to_v.weight",
"first_stage_model.decoder.mid.block_1.conv1.bias": "blocks.0.conv1.bias",
"first_stage_model.decoder.mid.block_1.conv1.weight": "blocks.0.conv1.weight",
"first_stage_model.decoder.mid.block_1.conv2.bias": "blocks.0.conv2.bias",
"first_stage_model.decoder.mid.block_1.conv2.weight": "blocks.0.conv2.weight",
"first_stage_model.decoder.mid.block_1.norm1.bias": "blocks.0.norm1.bias",
"first_stage_model.decoder.mid.block_1.norm1.weight": "blocks.0.norm1.weight",
"first_stage_model.decoder.mid.block_1.norm2.bias": "blocks.0.norm2.bias",
"first_stage_model.decoder.mid.block_1.norm2.weight": "blocks.0.norm2.weight",
"first_stage_model.decoder.mid.block_2.conv1.bias": "blocks.2.conv1.bias",
"first_stage_model.decoder.mid.block_2.conv1.weight": "blocks.2.conv1.weight",
"first_stage_model.decoder.mid.block_2.conv2.bias": "blocks.2.conv2.bias",
"first_stage_model.decoder.mid.block_2.conv2.weight": "blocks.2.conv2.weight",
"first_stage_model.decoder.mid.block_2.norm1.bias": "blocks.2.norm1.bias",
"first_stage_model.decoder.mid.block_2.norm1.weight": "blocks.2.norm1.weight",
"first_stage_model.decoder.mid.block_2.norm2.bias": "blocks.2.norm2.bias",
"first_stage_model.decoder.mid.block_2.norm2.weight": "blocks.2.norm2.weight",
"first_stage_model.decoder.norm_out.bias": "conv_norm_out.bias",
"first_stage_model.decoder.norm_out.weight": "conv_norm_out.weight",
"first_stage_model.decoder.up.0.block.0.conv1.bias": "blocks.15.conv1.bias",
"first_stage_model.decoder.up.0.block.0.conv1.weight": "blocks.15.conv1.weight",
"first_stage_model.decoder.up.0.block.0.conv2.bias": "blocks.15.conv2.bias",
"first_stage_model.decoder.up.0.block.0.conv2.weight": "blocks.15.conv2.weight",
"first_stage_model.decoder.up.0.block.0.nin_shortcut.bias": "blocks.15.conv_shortcut.bias",
"first_stage_model.decoder.up.0.block.0.nin_shortcut.weight": "blocks.15.conv_shortcut.weight",
"first_stage_model.decoder.up.0.block.0.norm1.bias": "blocks.15.norm1.bias",
"first_stage_model.decoder.up.0.block.0.norm1.weight": "blocks.15.norm1.weight",
"first_stage_model.decoder.up.0.block.0.norm2.bias": "blocks.15.norm2.bias",
"first_stage_model.decoder.up.0.block.0.norm2.weight": "blocks.15.norm2.weight",
"first_stage_model.decoder.up.0.block.1.conv1.bias": "blocks.16.conv1.bias",
"first_stage_model.decoder.up.0.block.1.conv1.weight": "blocks.16.conv1.weight",
"first_stage_model.decoder.up.0.block.1.conv2.bias": "blocks.16.conv2.bias",
"first_stage_model.decoder.up.0.block.1.conv2.weight": "blocks.16.conv2.weight",
"first_stage_model.decoder.up.0.block.1.norm1.bias": "blocks.16.norm1.bias",
"first_stage_model.decoder.up.0.block.1.norm1.weight": "blocks.16.norm1.weight",
"first_stage_model.decoder.up.0.block.1.norm2.bias": "blocks.16.norm2.bias",
"first_stage_model.decoder.up.0.block.1.norm2.weight": "blocks.16.norm2.weight",
"first_stage_model.decoder.up.0.block.2.conv1.bias": "blocks.17.conv1.bias",
"first_stage_model.decoder.up.0.block.2.conv1.weight": "blocks.17.conv1.weight",
"first_stage_model.decoder.up.0.block.2.conv2.bias": "blocks.17.conv2.bias",
"first_stage_model.decoder.up.0.block.2.conv2.weight": "blocks.17.conv2.weight",
"first_stage_model.decoder.up.0.block.2.norm1.bias": "blocks.17.norm1.bias",
"first_stage_model.decoder.up.0.block.2.norm1.weight": "blocks.17.norm1.weight",
"first_stage_model.decoder.up.0.block.2.norm2.bias": "blocks.17.norm2.bias",
"first_stage_model.decoder.up.0.block.2.norm2.weight": "blocks.17.norm2.weight",
"first_stage_model.decoder.up.1.block.0.conv1.bias": "blocks.11.conv1.bias",
"first_stage_model.decoder.up.1.block.0.conv1.weight": "blocks.11.conv1.weight",
"first_stage_model.decoder.up.1.block.0.conv2.bias": "blocks.11.conv2.bias",
"first_stage_model.decoder.up.1.block.0.conv2.weight": "blocks.11.conv2.weight",
"first_stage_model.decoder.up.1.block.0.nin_shortcut.bias": "blocks.11.conv_shortcut.bias",
"first_stage_model.decoder.up.1.block.0.nin_shortcut.weight": "blocks.11.conv_shortcut.weight",
"first_stage_model.decoder.up.1.block.0.norm1.bias": "blocks.11.norm1.bias",
"first_stage_model.decoder.up.1.block.0.norm1.weight": "blocks.11.norm1.weight",
"first_stage_model.decoder.up.1.block.0.norm2.bias": "blocks.11.norm2.bias",
"first_stage_model.decoder.up.1.block.0.norm2.weight": "blocks.11.norm2.weight",
"first_stage_model.decoder.up.1.block.1.conv1.bias": "blocks.12.conv1.bias",
"first_stage_model.decoder.up.1.block.1.conv1.weight": "blocks.12.conv1.weight",
"first_stage_model.decoder.up.1.block.1.conv2.bias": "blocks.12.conv2.bias",
| "first_stage_model.decoder.up.1.block.1.conv2.weight": "blocks.12.conv2.weight", |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: vikhyat/mixtral-inference
# Path: mixtral/rope.py
def precompute_freqs_cis(dim: int, end: int, theta: float = 10000.0) -> torch.Tensor:
freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
t = torch.arange(end, device=freqs.device) # type: ignore
freqs = torch.outer(t, freqs).float() # type: ignore
return torch.polar(torch.ones_like(freqs), freqs) # complex64
# Path: mixtral/rope.py
def apply_rotary_emb(
xq: torch.Tensor,
xk: torch.Tensor,
freqs_cis: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
freqs_cis = freqs_cis[:, None, :]
xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(2)
xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(2)
return xq_out.type_as(xq), xk_out.type_as(xk)
# Path: mixtral/cache.py
class CacheView:
def __init__(self, cache_k: torch.Tensor, cache_v: torch.Tensor, metadata: RotatingCacheInputMetadata, kv_seqlens: torch.Tensor):
self.cache_k = cache_k
self.cache_v = cache_v
self.kv_seqlens = kv_seqlens
self.metadata = metadata
def update(self, xk: torch.Tensor, xv: torch.Tensor):
"""
to_cache_mask masks the last [sliding_window] tokens in each sequence
"""
n_kv_heads, head_dim = self.cache_k.shape[-2:]
flat_cache_k = self.cache_k.view(-1, n_kv_heads, head_dim)
flat_cache_v = self.cache_v.view(-1, n_kv_heads, head_dim)
flat_cache_k.index_copy_(0, self.metadata.cache_positions, xk[self.metadata.to_cache_mask])
flat_cache_v.index_copy_(0, self.metadata.cache_positions, xv[self.metadata.to_cache_mask])
def interleave_kv(self, xk: torch.Tensor, xv: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
This is a naive implementation and not optimized for speed.
"""
assert xk.ndim == xv.ndim == 3 # (B * T, H, D)
assert xk.shape == xv.shape
if all([s == 0 for s in self.metadata.seqlens]):
# No cache to interleave
return xk, xv
# Make it a list of [(T, H, D)]
xk = torch.split(xk, self.metadata.seqlens)
xv = torch.split(xv, self.metadata.seqlens)
assert len(xk) == len(self.kv_seqlens), f"Batch size is {len(self.kv_seqlens)}, got {len(xk)}"
# Order elements in cache by position by unrotating
cache_k = [unrotate(t, s) for t, s in zip(self.cache_k.to(xk[0].device), self.kv_seqlens)]
cache_v = [unrotate(t, s) for t, s in zip(self.cache_v.to(xv[0].device), self.kv_seqlens)]
interleaved_k = interleave_list(cache_k, xk)
interleaved_v = interleave_list(cache_v, xv)
return torch.cat(interleaved_k, dim=0), torch.cat(interleaved_v, dim=0)
@property
def sliding_window(self):
return self.cache_k.shape[1]
@property
def key(self) -> torch.Tensor:
return self.cache_k[:len(self.kv_seqlens)]
@property
def value(self) -> torch.Tensor:
return self.cache_v[:len(self.kv_seqlens)]
@property
def prefill(self):
return self.metadata.prefill
@property
def mask(self):
return self.metadata.mask
# Path: mixtral/cache.py
class RotatingBufferCache:
"""
This is an example that implements a less naive rotating buffer cache, allowing for variable length sequences.
Allocated cache is rectangular which is wasteful (see PagedAttention for better mechanisms)
"""
def __init__(self, n_layers: int, max_batch_size: int, sliding_window: int, n_kv_heads: int, head_dim: int):
self.sliding_window = sliding_window
self.n_kv_heads = n_kv_heads
self.head_dim = head_dim
self.cache_k = torch.empty((
n_layers,
max_batch_size,
sliding_window,
n_kv_heads,
head_dim
))
self.cache_v = torch.empty((
n_layers,
max_batch_size,
sliding_window,
n_kv_heads,
head_dim
))
# holds the valid length for each batch element in the cache
self.kv_seqlens = None
def get_view(self, layer_id: int, metadata: RotatingCacheInputMetadata) -> CacheView:
return CacheView(self.cache_k[layer_id], self.cache_v[layer_id], metadata, self.kv_seqlens)
def reset(self):
self.kv_seqlens = None
def init_kvseqlens(self, batch_size: int):
self.kv_seqlens = torch.zeros((batch_size,), device=self.device, dtype=torch.long)
@property
def device(self):
return self.cache_k.device
def to(self, device: torch.device, dtype: torch.dtype):
self.cache_k = self.cache_k.to(device=device, dtype=dtype)
self.cache_v = self.cache_v.to(device=device, dtype=dtype)
return self
def update_seqlens(self, seqlens: List[int]):
self.kv_seqlens += torch.tensor(seqlens, device=self.device, dtype=torch.long)
def get_input_metadata(self, seqlens: List[int]) -> RotatingCacheInputMetadata:
"""
inpput = seqlens [5,7,2] // seqpos [0, 1, 3] // sliding_window 3
--> only cache last 3 tokens in each sequence
- to_cache_mask = [0 0 1 1 1 | 0 0 0 0 1 1 1 | 1 1]
- cached_elements = [3 | 3 | 2]
--> absolute positions are used for rope
- positions = [0 1 2 3 4 | 1 2 3 4 5 6 7 | 3 4]
--> cache positions are positions cache_masked, modulo sliding_window + batch_idx * sliding_window
- cache_positions = [2 0 1 | 5 3 4 | 6 7]
"""
if self.kv_seqlens is None:
self.init_kvseqlens(len(seqlens))
assert len(seqlens) == len(self.kv_seqlens), f"Batch size is {len(self.kv_seqlens)}, got {len(seqlens)}, did you forget to reset cache?"
seqpos = self.kv_seqlens.tolist()
assert len(seqlens) > 0, seqlens
masks = [
[x >= seqlen - self.sliding_window for x in range(seqlen)]
for seqlen in seqlens
]
to_cache_mask = torch.tensor(sum(masks, []), device=self.device, dtype=torch.bool)
cached_elements = torch.tensor([sum(mask) for mask in masks], device=self.device, dtype=torch.long)
positions = torch.cat([torch.arange(pos, pos + seqlen) for pos, seqlen in zip(seqpos, seqlens)]).to(device=self.device, dtype=torch.long)
batch_idx = torch.tensor(sum([[i]*seqlen for i, seqlen in enumerate(seqlens)], []), device=self.device, dtype=torch.long)
cache_positions = positions % self.sliding_window + batch_idx * self.sliding_window
first_prefill = seqpos[0] == 0
subsequent_prefill = any(seqlen > 1 for seqlen in seqlens)
if first_prefill:
assert all([pos == 0 for pos in seqpos]), (seqpos)
mask = BlockDiagonalCausalMask.from_seqlens(seqlens).make_local_attention(self.sliding_window)
elif subsequent_prefill:
mask = BlockDiagonalMask.from_seqlens(
q_seqlen=seqlens,
kv_seqlen=[s + cached_s.clamp(max=self.sliding_window).item() for (s, cached_s) in zip(seqlens, self.kv_seqlens)]
).make_local_attention_from_bottomright(self.sliding_window)
else:
mask = BlockDiagonalCausalWithOffsetPaddedKeysMask.from_seqlens(
q_seqlen=seqlens,
kv_padding=self.sliding_window,
kv_seqlen=(self.kv_seqlens + cached_elements).clamp(max=self.sliding_window).tolist()
)
return RotatingCacheInputMetadata(
positions=positions,
to_cache_mask=to_cache_mask,
cached_elements=cached_elements,
cache_positions=cache_positions[to_cache_mask],
prefill=first_prefill or subsequent_prefill,
mask=mask,
seqlens=seqlens,
)
# Path: mixtral/model.py
import torch
import json
from torch import nn
from dataclasses import dataclass
from pathlib import Path
from typing import List, Optional
from mixtral.rope import precompute_freqs_cis, apply_rotary_emb
from mixtral.cache import CacheView, RotatingBufferCache
from xformers.ops.fmha import (
memory_efficient_attention,
)
self.args = args
self.n_heads: int = args.n_heads
self.n_kv_heads: int = args.n_kv_heads
self.repeats = self.n_heads // self.n_kv_heads
self.scale = self.args.head_dim**-0.5
self.wq = nn.Linear(
args.dim,
args.n_heads * args.head_dim,
bias=False,
device='meta',
dtype=dtype
)
self.wq.to_empty(device=device)
self.wk = nn.Linear(
args.dim,
args.n_kv_heads * args.head_dim,
bias=False,
device='meta',
dtype=dtype
)
self.wk.to_empty(device=device)
self.wv = nn.Linear(
args.dim,
args.n_kv_heads * args.head_dim,
bias=False,
device='meta',
dtype=dtype
)
self.wv.to_empty(device=device)
self.wo = nn.Linear(
args.n_heads * args.head_dim,
args.dim,
bias=False,
device='meta',
dtype=dtype
)
self.wo.to_empty(device=device)
def forward(
self, x: torch.Tensor,
freqs_cis: torch.Tensor,
cache: Optional[CacheView],
) -> torch.Tensor:
seqlen_sum, _ = x.shape
xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
xq = xq.view(seqlen_sum, self.n_heads, self.args.head_dim)
xk = xk.view(seqlen_sum, self.n_kv_heads, self.args.head_dim)
xv = xv.view(seqlen_sum, self.n_kv_heads, self.args.head_dim)
xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis)
xk = xk.to('cuda:0')
xv = xv.to('cuda:0')
if cache is None:
key, val = xk, xv
elif cache.prefill:
key, val = cache.interleave_kv(xk, xv)
cache.update(xk, xv)
else:
cache.update(xk, xv)
key, val = cache.key, cache.value
key = key.view(seqlen_sum * cache.sliding_window, self.n_kv_heads, self.args.head_dim)
val = val.view(seqlen_sum * cache.sliding_window, self.n_kv_heads, self.args.head_dim)
key, val = key.to(x.device), val.to(x.device)
# Repeat keys and values to match number of query heads
key, val = repeat_kv(key, val, self.repeats, dim=1)
# xformers requires (B=1, S, H, D)
xq, key, val = xq[None, ...], key[None, ...], val[None, ...]
output = memory_efficient_attention(xq, key, val, None if cache is None else cache.mask)
return self.wo(output.view_as(x))
class FeedForward(nn.Module):
def __init__(self, args: ModelArgs, device='cuda', dtype=torch.float16):
super().__init__()
self.gate = nn.Linear(args.dim, args.moe['num_experts'],
bias=False, device='meta', dtype=dtype)
self.gate.to_empty(device=device)
self.experts = torch.nn.ModuleList(
[FeedForwardExpert(args, device=device, dtype=dtype)
for _ in range(args.moe['num_experts'])]
)
def forward(self, x) -> torch.Tensor:
g = self.gate(x)
g = torch.softmax(g, dim=-1)
weights, expert_indices = torch.topk(g, 2, dim=-1)
weights /= weights.sum(dim=-1, keepdim=True)
result = torch.zeros_like(x)
for batch in range(x.shape[0]):
w_b, ei_b = weights[batch], expert_indices[batch]
for i, w in zip(ei_b, w_b):
result[batch] += w * self.experts[i](x[batch])
return result
class FeedForwardExpert(nn.Module):
def __init__(self, args: ModelArgs, device='cuda', dtype=torch.float16):
super().__init__()
self.w1 = nn.Linear(
args.dim,
args.hidden_dim,
bias=False,
device='meta',
dtype=dtype
)
self.w1.to_empty(device=device)
| self.w2 = nn.Linear( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: upfusion3d/upfusion
# Path: control_net/ldm/modules/diffusionmodules/model.py
class Encoder(nn.Module):
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla",
**ignore_kwargs):
super().__init__()
if use_linear_attn: attn_type = "linear"
self.ch = ch
self.temb_ch = 0
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
# downsampling
self.conv_in = torch.nn.Conv2d(in_channels,
self.ch,
kernel_size=3,
stride=1,
padding=1)
curr_res = resolution
in_ch_mult = (1,)+tuple(ch_mult)
self.in_ch_mult = in_ch_mult
self.down = nn.ModuleList()
for i_level in range(self.num_resolutions):
block = nn.ModuleList()
attn = nn.ModuleList()
block_in = ch*in_ch_mult[i_level]
block_out = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks):
block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(make_attn(block_in, attn_type=attn_type))
down = nn.Module()
down.block = block
down.attn = attn
if i_level != self.num_resolutions-1:
down.downsample = Downsample(block_in, resamp_with_conv)
curr_res = curr_res // 2
self.down.append(down)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
self.mid.block_2 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(block_in,
2*z_channels if double_z else z_channels,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x):
# timestep embedding
temb = None
# downsampling
hs = [self.conv_in(x)]
for i_level in range(self.num_resolutions):
for i_block in range(self.num_res_blocks):
h = self.down[i_level].block[i_block](hs[-1], temb)
if len(self.down[i_level].attn) > 0:
h = self.down[i_level].attn[i_block](h)
hs.append(h)
if i_level != self.num_resolutions-1:
hs.append(self.down[i_level].downsample(hs[-1]))
# middle
h = hs[-1]
h = self.mid.block_1(h, temb)
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# end
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
# Path: control_net/ldm/modules/diffusionmodules/model.py
class Decoder(nn.Module):
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,
attn_type="vanilla", **ignorekwargs):
super().__init__()
if use_linear_attn: attn_type = "linear"
self.ch = ch
self.temb_ch = 0
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
self.give_pre_end = give_pre_end
self.tanh_out = tanh_out
# compute in_ch_mult, block_in and curr_res at lowest res
in_ch_mult = (1,)+tuple(ch_mult)
block_in = ch*ch_mult[self.num_resolutions-1]
curr_res = resolution // 2**(self.num_resolutions-1)
self.z_shape = (1,z_channels,curr_res,curr_res)
print("Working with z of shape {} = {} dimensions.".format(
self.z_shape, np.prod(self.z_shape)))
# z to block_in
self.conv_in = torch.nn.Conv2d(z_channels,
block_in,
kernel_size=3,
stride=1,
padding=1)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
self.mid.block_2 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
# upsampling
self.up = nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = nn.ModuleList()
attn = nn.ModuleList()
block_out = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks+1):
block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(make_attn(block_in, attn_type=attn_type))
up = nn.Module()
up.block = block
up.attn = attn
if i_level != 0:
up.upsample = Upsample(block_in, resamp_with_conv)
curr_res = curr_res * 2
self.up.insert(0, up) # prepend to get consistent order
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(block_in,
out_ch,
kernel_size=3,
stride=1,
padding=1)
def forward(self, z):
#assert z.shape[1:] == self.z_shape[1:]
self.last_z_shape = z.shape
# timestep embedding
temb = None
# z to block_in
h = self.conv_in(z)
# middle
h = self.mid.block_1(h, temb)
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# upsampling
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks+1):
h = self.up[i_level].block[i_block](h, temb)
if len(self.up[i_level].attn) > 0:
h = self.up[i_level].attn[i_block](h)
if i_level != 0:
h = self.up[i_level].upsample(h)
# end
if self.give_pre_end:
return h
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
if self.tanh_out:
h = torch.tanh(h)
return h
# Path: control_net/ldm/modules/distributions/distributions.py
class DiagonalGaussianDistribution(object):
def __init__(self, parameters, deterministic=False):
self.parameters = parameters
self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
self.deterministic = deterministic
self.std = torch.exp(0.5 * self.logvar)
self.var = torch.exp(self.logvar)
if self.deterministic:
self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)
def sample(self):
x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)
return x
def kl(self, other=None):
if self.deterministic:
return torch.Tensor([0.])
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean, 2)
+ self.var - 1.0 - self.logvar,
dim=[1, 2, 3])
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean, 2) / other.var
+ self.var / other.var - 1.0 - self.logvar + other.logvar,
dim=[1, 2, 3])
def nll(self, sample, dims=[1,2,3]):
if self.deterministic:
return torch.Tensor([0.])
logtwopi = np.log(2.0 * np.pi)
return 0.5 * torch.sum(
logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
dim=dims)
def mode(self):
return self.mean
# Path: control_net/ldm/util.py
def instantiate_from_config(config):
if not "target" in config:
if config == '__is_first_stage__':
return None
elif config == "__is_unconditional__":
return None
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict()))
# Path: control_net/ldm/modules/ema.py
class LitEma(nn.Module):
def __init__(self, model, decay=0.9999, use_num_upates=True):
super().__init__()
if decay < 0.0 or decay > 1.0:
raise ValueError('Decay must be between 0 and 1')
self.m_name2s_name = {}
self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))
self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates
else torch.tensor(-1, dtype=torch.int))
for name, p in model.named_parameters():
if p.requires_grad:
# remove as '.'-character is not allowed in buffers
s_name = name.replace('.', '')
self.m_name2s_name.update({name: s_name})
self.register_buffer(s_name, p.clone().detach().data)
self.collected_params = []
def reset_num_updates(self):
del self.num_updates
self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))
def forward(self, model):
decay = self.decay
if self.num_updates >= 0:
self.num_updates += 1
decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))
one_minus_decay = 1.0 - decay
with torch.no_grad():
m_param = dict(model.named_parameters())
shadow_params = dict(self.named_buffers())
for key in m_param:
if m_param[key].requires_grad:
sname = self.m_name2s_name[key]
shadow_params[sname] = shadow_params[sname].type_as(m_param[key])
shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))
else:
assert not key in self.m_name2s_name
def copy_to(self, model):
m_param = dict(model.named_parameters())
shadow_params = dict(self.named_buffers())
for key in m_param:
if m_param[key].requires_grad:
m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)
else:
assert not key in self.m_name2s_name
def store(self, parameters):
"""
Save the current parameters for restoring later.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
temporarily stored.
"""
self.collected_params = [param.clone() for param in parameters]
def restore(self, parameters):
"""
Restore the parameters stored with the `store` method.
Useful to validate the model with EMA parameters without affecting the
original optimization process. Store the parameters before the
`copy_to` method. After validation (or model saving), use this to
restore the former parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored parameters.
"""
for c_param, param in zip(self.collected_params, parameters):
param.data.copy_(c_param.data)
# Path: control_net/ldm/models/autoencoder.py
import torch
import pytorch_lightning as pl
import torch.nn.functional as F
from contextlib import contextmanager
from control_net.ldm.modules.diffusionmodules.model import Encoder, Decoder
from control_net.ldm.modules.distributions.distributions import DiagonalGaussianDistribution
from control_net.ldm.util import instantiate_from_config
from control_net.ldm.modules.ema import LitEma
if self.use_ema:
self.model_ema.restore(self.parameters())
if context is not None:
print(f"{context}: Restored training weights")
def on_train_batch_end(self, *args, **kwargs):
if self.use_ema:
self.model_ema(self)
def encode(self, x):
h = self.encoder(x)
moments = self.quant_conv(h)
posterior = DiagonalGaussianDistribution(moments)
return posterior
def decode(self, z):
z = self.post_quant_conv(z)
dec = self.decoder(z)
return dec
def forward(self, input, sample_posterior=True):
posterior = self.encode(input)
if sample_posterior:
z = posterior.sample()
else:
z = posterior.mode()
dec = self.decode(z)
return dec, posterior
def get_input(self, batch, k):
x = batch[k]
if len(x.shape) == 3:
x = x[..., None]
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
return x
def training_step(self, batch, batch_idx, optimizer_idx):
inputs = self.get_input(batch, self.image_key)
reconstructions, posterior = self(inputs)
if optimizer_idx == 0:
# train encoder+decoder+logvar
aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
last_layer=self.get_last_layer(), split="train")
self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)
return aeloss
if optimizer_idx == 1:
# train the discriminator
discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
last_layer=self.get_last_layer(), split="train")
self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)
return discloss
def validation_step(self, batch, batch_idx):
log_dict = self._validation_step(batch, batch_idx)
with self.ema_scope():
log_dict_ema = self._validation_step(batch, batch_idx, postfix="_ema")
return log_dict
def _validation_step(self, batch, batch_idx, postfix=""):
inputs = self.get_input(batch, self.image_key)
reconstructions, posterior = self(inputs)
aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,
last_layer=self.get_last_layer(), split="val"+postfix)
discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,
last_layer=self.get_last_layer(), split="val"+postfix)
self.log(f"val{postfix}/rec_loss", log_dict_ae[f"val{postfix}/rec_loss"])
self.log_dict(log_dict_ae)
self.log_dict(log_dict_disc)
return self.log_dict
def configure_optimizers(self):
lr = self.learning_rate
ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(
self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())
if self.learn_logvar:
print(f"{self.__class__.__name__}: Learning logvar")
ae_params_list.append(self.loss.logvar)
opt_ae = torch.optim.Adam(ae_params_list,
lr=lr, betas=(0.5, 0.9))
opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
lr=lr, betas=(0.5, 0.9))
return [opt_ae, opt_disc], []
def get_last_layer(self):
return self.decoder.conv_out.weight
@torch.no_grad()
def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):
log = dict()
x = self.get_input(batch, self.image_key)
x = x.to(self.device)
if not only_inputs:
xrec, posterior = self(x)
if x.shape[1] > 3:
# colorize with random projection
assert xrec.shape[1] > 3
x = self.to_rgb(x)
xrec = self.to_rgb(xrec)
log["samples"] = self.decode(torch.randn_like(posterior.sample()))
log["reconstructions"] = xrec
if log_ema or self.use_ema:
with self.ema_scope():
xrec_ema, posterior_ema = self(x)
if x.shape[1] > 3:
# colorize with random projection
assert xrec_ema.shape[1] > 3
xrec_ema = self.to_rgb(xrec_ema)
log["samples_ema"] = self.decode(torch.randn_like(posterior_ema.sample()))
log["reconstructions_ema"] = xrec_ema
log["inputs"] = x
return log
def to_rgb(self, x):
| assert self.image_key == "segmentation" |