file_path
stringlengths 22
162
| content
stringlengths 19
501k
| size
int64 19
501k
| lang
stringclasses 1
value | avg_line_length
float64 6.33
100
| max_line_length
int64 18
935
| alphanum_fraction
float64 0.34
0.93
|
---|---|---|---|---|---|---|
ashleygoldstein/LWM-Warehouse-Scene/Scripts/pallet_collision.py | from omni.kit.scripting import BehaviorScript
import omni
import omni.physx
from pxr import Gf, Sdf, PhysxSchema, UsdGeom, Usd
from omni.physx.scripts import utils
from omni.physx import get_physx_scene_query_interface
from omni.physx import get_physx_interface, get_physx_simulation_interface
from omni.physx.scripts.physicsUtils import *
import carb
class CollisionTest(BehaviorScript):
def on_init(self):
self.ignore_objects = []
self.pallet_collection = 0
self.collected_attr = self.prim.CreateAttribute('Collected', Sdf.ValueTypeNames.Int)
self.collected_attr.Set(0)
self.reset_character()
def on_play(self):
''''
Called on runtime
'''
self.reset_character()
self._contact_report_sub = get_physx_simulation_interface().subscribe_contact_report_events(self._on_contact_report_event)
contactReportAPI = PhysxSchema.PhysxContactReportAPI.Apply(self.prim)
contactReportAPI.CreateThresholdAttr().Set(self.contact_thresh)
def on_stop(self):
self.on_destroy()
def on_destroy(self):
self.pallet = None
self.collected_attr.Set(0)
self._contact_report_sub.unsubscribe()
self._contact_report_sub = None
def reset_character(self):
self.contact_thresh = 1
self.collected_attr.Set(0)
self.pallet_collection = 0
self.ignore_objects = []
# Assign this pallet as agent instance
self.pallet = str(self.prim_path)
# parent_prim = self.stage.GetPrimAtPath(self.package_path)
# if parent_prim.IsValid():
# children = parent_prim.GetAllChildren()
# self.package = [str(child.GetPath()) for child in children]
def on_update(self, current_time: float, delta_time: float):
"""
Called on every update. Initializes character at start,
publishes character positions and executes character commands.
:param float current_time: current time in seconds.
:param float delta_time: time elapsed since last update.
"""
return
def subscribe_to_contact(self):
# apply contact report
### This would be an example of each object managing their own collision
self._contact_report_sub = get_physx_simulation_interface().subscribe_contact_report_events(self._on_contact_report_event)
contactReportAPI = PhysxSchema.PhysxContactReportAPI.Apply(self.prim)
contactReportAPI.CreateThresholdAttr().Set(self.contact_thresh)
def _on_contact_report_event(self, contact_headers, contact_data):
# Check if a collision was because of a player
for contact_header in contact_headers:
collider_1 = str(PhysicsSchemaTools.intToSdfPath(contact_header.actor0))
collider_2 = str(PhysicsSchemaTools.intToSdfPath(contact_header.actor1))
contacts = [collider_1, collider_2]
if self.prim_path in contacts:
self.object_path = ""
if self.prim_path == collider_1:
self.object_path = collider_2
else:
self.object_path = collider_1
print(collider_2)
if self.object_path in self.ignore_objects:
continue
else:
self.ignore_objects.append(self.object_path)
self.pallet_collection += 1
print(f'Collected: {self.pallet_collection}')
self.collected_attr.Set(self.pallet_collection)
| 3,729 | Python | 35.213592 | 130 | 0.616251 |
Vadim-Karpenko/omniverse-material-manager-extended/tools/scripts/link_app.py | import os
import argparse
import sys
import json
import packmanapi
import urllib3
def find_omniverse_apps():
http = urllib3.PoolManager()
try:
r = http.request("GET", "http://127.0.0.1:33480/components")
except Exception as e:
print(f"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\nError: {e}")
sys.exit(1)
apps = {}
for x in json.loads(r.data.decode("utf-8")):
latest = x.get("installedVersions", {}).get("latest", "")
if latest:
for s in x.get("settings", []):
if s.get("version", "") == latest:
root = s.get("launch", {}).get("root", "")
apps[x["slug"]] = (x["name"], root)
break
return apps
def create_link(src, dst):
print(f"Creating a link '{src}' -> '{dst}'")
packmanapi.link(src, dst)
APP_PRIORITIES = ["code", "create", "view"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher")
parser.add_argument(
"--path",
help="Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'",
required=False,
)
parser.add_argument(
"--app", help="Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'", required=False
)
args = parser.parse_args()
path = args.path
if not path:
print("Path is not specified, looking for Omniverse Apps...")
apps = find_omniverse_apps()
if len(apps) == 0:
print(
"Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers."
)
sys.exit(0)
print("\nFound following Omniverse Apps:")
for i, slug in enumerate(apps):
name, root = apps[slug]
print(f"{i}: {name} ({slug}) at: '{root}'")
if args.app:
selected_app = args.app.lower()
if selected_app not in apps:
choices = ", ".join(apps.keys())
print(f"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}")
sys.exit(0)
else:
selected_app = next((x for x in APP_PRIORITIES if x in apps), None)
if not selected_app:
selected_app = next(iter(apps))
print(f"\nSelected app: {selected_app}")
_, path = apps[selected_app]
if not os.path.exists(path):
print(f"Provided path doesn't exist: {path}")
else:
SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__))
create_link(f"{SCRIPT_ROOT}/../../app", path)
print("Success!")
| 2,813 | Python | 32.5 | 133 | 0.562389 |
Vadim-Karpenko/omniverse-material-manager-extended/tools/packman/bootstrap/install_package.py | # Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import zipfile
import tempfile
import sys
import shutil
__author__ = "hfannar"
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
class TemporaryDirectory:
def __init__(self):
self.path = None
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, type, value, traceback):
# Remove temporary data created
shutil.rmtree(self.path)
def install_package(package_src_path, package_dst_path):
with zipfile.ZipFile(
package_src_path, allowZip64=True
) as zip_file, TemporaryDirectory() as temp_dir:
zip_file.extractall(temp_dir)
# Recursively copy (temp_dir will be automatically cleaned up on exit)
try:
# Recursive copy is needed because both package name and version folder could be missing in
# target directory:
shutil.copytree(temp_dir, package_dst_path)
except OSError as exc:
logger.warning(
"Directory %s already present, packaged installation aborted" % package_dst_path
)
else:
logger.info("Package successfully installed to %s" % package_dst_path)
install_package(sys.argv[1], sys.argv[2])
| 1,888 | Python | 31.568965 | 103 | 0.68697 |
Vadim-Karpenko/omniverse-material-manager-extended/exts/karpenko.materialsmanager.ext/karpenko/materialsmanager/ext/prim_serializer.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["update_property_paths", "get_prim_as_text", "text_to_stage"]
from omni.kit.commands import execute
from pxr import Sdf
from pxr import Tf
from pxr import Usd
from typing import List
from typing import Optional
def _to_layer(text: str) -> Optional[Sdf.Layer]:
"""Create an sdf layer from the given text"""
if not text.startswith("#usda 1.0\n"):
text = "#usda 1.0\n" + text
anonymous_layer = Sdf.Layer.CreateAnonymous("clipboard.usda")
try:
if not anonymous_layer.ImportFromString(text):
return
except Tf.ErrorException:
return
return anonymous_layer
def update_property_paths(prim_spec, old_path, new_path):
if not prim_spec:
return
for rel in prim_spec.relationships:
rel.targetPathList.explicitItems = [path.ReplacePrefix(old_path, new_path)
for path in rel.targetPathList.explicitItems]
for attr in prim_spec.attributes:
attr.connectionPathList.explicitItems = [path.ReplacePrefix(old_path, new_path)
for path in attr.connectionPathList.explicitItems]
for child in prim_spec.nameChildren:
update_property_paths(child, old_path, new_path)
def get_prim_as_text(stage: Usd.Stage, prim_paths: List[Sdf.Path]) -> Optional[str]:
"""Generate a text from the stage and prim path"""
if not prim_paths:
return
# TODO: It can be slow in large scenes. Ideally we need to flatten specific prims.
flatten_layer = stage.Flatten()
anonymous_layer = Sdf.Layer.CreateAnonymous(prim_paths[0].name + ".usda")
paths_map = {}
for i in range(0, len(prim_paths)):
item_name = str.format("Item_{:02d}", i)
Sdf.PrimSpec(anonymous_layer, item_name, Sdf.SpecifierDef)
prim_path = prim_paths[i]
anonymous_path = Sdf.Path.absoluteRootPath.AppendChild(item_name).AppendChild(prim_path.name)
# Copy
Sdf.CopySpec(flatten_layer, prim_path, anonymous_layer, anonymous_path)
paths_map[prim_path] = anonymous_path
for prim in anonymous_layer.rootPrims:
for source_path, target_path in paths_map.items():
update_property_paths(prim, source_path, target_path)
return anonymous_layer.ExportToString()
def text_to_stage(stage: Usd.Stage, text: str, root: Sdf.Path = Sdf.Path.absoluteRootPath) -> bool:
"""
Convert the given text to the prim and place it to the stage under the
given root.
"""
source_layer = _to_layer(text)
if not source_layer:
return False
execute("ImportLayer", layer=source_layer, stage=stage, root=root)
return True
| 3,123 | Python | 32.956521 | 101 | 0.675312 |
Vadim-Karpenko/omniverse-material-manager-extended/exts/karpenko.materialsmanager.ext/karpenko/materialsmanager/ext/style.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["materialsmanager_window_style"]
import omni.kit.app
import omni.ui as ui
import pathlib
from omni.ui import color as cl
EXTENSION_FOLDER_PATH = pathlib.Path(
omni.kit.app.get_app().get_extension_manager().get_extension_path_by_module(__name__)
)
# The main style dict
materialsmanager_window_style = {
"Image::material_preview": {
"image_url": f"{EXTENSION_FOLDER_PATH}/data/icons/[email protected]",
},
"Label::main_label": {
"alignment": ui.Alignment.LEFT_CENTER,
"color": cl("#a1a1a1"),
"font_size": 24,
},
"Label::main_hint": {
"alignment": ui.Alignment.CENTER,
"margin_height": 1,
"margin_width": 10,
"font_size": 16,
},
"Label::main_hint_small": {
"alignment": ui.Alignment.CENTER,
"color": cl("#a1a1a1"),
},
"Label::material_name": {
"alignment": ui.Alignment.LEFT_CENTER,
"font_size": 14,
},
"Label::secondary_label": {
"alignment": ui.Alignment.LEFT_CENTER,
"color": cl("#a1a1a1"),
"font_size": 18,
},
"Label::material_counter": {
"alignment": ui.Alignment.CENTER,
"margin_height": 1,
"margin_width": 10,
"font_size": 14,
},
}
# The style dict for the viewport widget ui
viewport_widget_style = {
"Button.Label": {
"font_size": 30,
},
"Button.Label:disabled": {
"color": cl("#a1a1a1")
},
"Button:disabled": {
"background_color": cl("#4d4d4d"),
},
"Button": {
"alignment": ui.Alignment.BOTTOM,
"background_color": cl("#666666"),
},
"Label::name_label": {
"alignment": ui.Alignment.CENTER_BOTTOM,
"font_size": 34,
}
}
| 2,175 | Python | 27.25974 | 89 | 0.605057 |
Vadim-Karpenko/omniverse-material-manager-extended/exts/karpenko.materialsmanager.ext/karpenko/materialsmanager/ext/extension.py | import asyncio
import base64
import json
import math
import carb
import omni.ext
import omni.kit.commands
import omni.ui as ui
import omni.usd
from omni.kit.viewport.utility import (get_active_viewport_camera_path,
get_active_viewport_window,
get_ui_position_for_prim)
from pxr import Sdf
from .prim_serializer import get_prim_as_text, text_to_stage
from .style import materialsmanager_window_style as _style
from .viewport_ui.widget_info_scene import WidgetInfoScene
class MaterialManagerExtended(omni.ext.IExt):
WINDOW_NAME = "Material Manager Extended"
SCENE_SETTINGS_WINDOW_NAME = "Material Manager Settings"
MENU_PATH = "Window/" + WINDOW_NAME
def on_startup(self, ext_id):
print("[karpenko.materialsmanager.ext] MaterialManagerExtended startup")
self._usd_context = omni.usd.get_context()
self._selection = self._usd_context.get_selection()
self.latest_selected_prim = None
self.variants_frame_original = None
self.variants_frame = None
self.active_objects_frame = None
self._window = None
self._window_scenemanager = None
self.materials_frame = None
self.main_frame = None
self.ignore_change = False
self.ignore_settings_update = False
self.roaming_timer = None
self.ext_id = ext_id
self._widget_info_viewport = None
self.current_ui = "default"
self.is_settings_open = False
self.ignore_next_select = False
self.last_roaming_prim = None
self.reticle = None
self.stage = self._usd_context.get_stage()
self.allowed_commands = [
"SelectPrimsCommand",
"SelectPrims",
"CreatePrimCommand",
"DeletePrims",
"TransformPrimCommand",
"Undo",
"BindMaterial",
"BindMaterialCommand",
"MovePrims",
"MovePrim",
]
self.is_settings_window_open = False
self.render_default_layout()
# show the window in the usual way if the stage is loaded
if self.stage:
self._window.deferred_dock_in("Property")
else:
# otherwise, show the window after the stage is loaded
self._setup_window_task = asyncio.ensure_future(self._dock_window())
omni.kit.commands.subscribe_on_change(self.on_change)
self.roaming_timer = asyncio.ensure_future(self.enable_roaming_timer())
def on_shutdown(self):
"""
This function is called when the addon is disabled
"""
omni.kit.commands.unsubscribe_on_change(self.on_change)
if self.roaming_timer:
self.disable_roaming_timer()
# Deregister the function that shows the window from omni.ui
ui.Workspace.set_show_window_fn(self.WINDOW_NAME, None)
if self._window:
self._window.destroy()
self._window = None
self._selection = None
self._usd_context = None
self.latest_selected_prim = None
self.variants_frame_original = None
self.variants_frame = None
self.materials_frame = None
self.main_frame = None
if self._widget_info_viewport:
self._widget_info_viewport.destroy()
self._widget_info_viewport = None
if self.reticle:
self.reticle.destroy()
self.reticle = None
print("[karpenko.materialsmanager.ext] MaterialManagerExtended shutdown")
async def _dock_window(self):
"""
It waits for the property window to appear, then docks the window to it
"""
property_win = None
frames = 3
while frames > 0:
if not property_win:
property_win = ui.Workspace.get_window("Property")
if property_win:
break # early out
frames = frames - 1
await omni.kit.app.get_app().next_update_async()
# Dock to property window after 5 frames. It's enough for window to appear.
for _ in range(5):
await omni.kit.app.get_app().next_update_async()
if property_win:
self._window.deferred_dock_in("Property")
self._setup_window_task = None
async def enable_roaming_timer(self):
while True:
await asyncio.sleep(1.0)
self.get_closest_mme_object()
def disable_roaming_timer(self):
self.roaming_timer = None
def get_latest_version(self, looks):
"""
It takes a list of looks, and returns the next available version number
:param looks: The parent folder of the looks
:return: The latest version of the look.
"""
latest_version = 1
versions = []
for look in looks.GetChildren():
look_path = look.GetPath()
if look_path.name.startswith("Look_"):
version = int(look_path.name.split("_")[-1])
versions.append(version)
versions.sort()
for version in versions:
if version != latest_version:
return latest_version
else:
latest_version += 1
return latest_version
def add_variant(self, looks, parent_prim):
"""
It creates a new folder under the Looks folder, copies all materials attached to the meshes and re-binds them
so the user can tweak copies instead of the original ones.
:param looks: The looks folder
:param parent_prim: The prim that contains the meshes that need to be assigned the new materials
"""
looks = parent_prim.GetPrimAtPath("Looks")
looks_path = looks.GetPath()
self.ignore_change = True
# group all commands so it can be undone at once
with omni.kit.undo.group():
all_meshes = self.get_meshes_from_prim(parent_prim)
all_materials = self.get_data_from_meshes(all_meshes)
# Check if folder (prim, Scope) MME already exist
if not looks.GetPrimAtPath("MME"):
# Create a folder called MME under the looks folder, it will contain all the materials for all variants
omni.kit.commands.execute(
"CreatePrim",
prim_path=f"{looks_path}/MME",
prim_type="Scope",
attributes={},
select_new_prim=False
)
is_active_attr_path = Sdf.Path(f"{looks_path}/MME.MMEisActive")
omni.kit.commands.execute(
'CreateUsdAttributeOnPath',
attr_path=is_active_attr_path,
attr_type=Sdf.ValueTypeNames.Bool,
custom=True,
attr_value=False,
variability=Sdf.VariabilityVarying
)
self.set_mesh_data(all_materials, looks_path, None)
# Generate a new name for the variant based on the quantity of previous ones
folder_name = f"Look_{self.get_latest_version(looks.GetPrimAtPath('MME'))}"
# Create a folder for the new variant
omni.kit.commands.execute(
"CreatePrim",
prim_path=f"{looks_path}/MME/{folder_name}",
prim_type="Scope",
attributes={},
select_new_prim=False
)
is_active_attr_path = Sdf.Path(f"{looks_path}/MME/{folder_name}.MMEisActive")
omni.kit.commands.execute(
'CreateUsdAttributeOnPath',
attr_path=is_active_attr_path,
attr_type=Sdf.ValueTypeNames.Bool,
custom=True,
variability=Sdf.VariabilityVarying
)
if folder_name is None:
new_looks_folder = looks
else:
new_looks_folder = looks.GetPrimAtPath(f"MME/{folder_name}")
new_looks_folder_path = new_looks_folder.GetPath()
materials_to_copy = [mat_data["path"] for mat_data in all_materials]
# remove duplicates
materials_to_copy = list(set(materials_to_copy))
# Copy material's prim as text
usd_code = get_prim_as_text(self.stage, materials_to_copy)
# put the clone material into the scene
text_to_stage(self.stage, usd_code, new_looks_folder_path)
self.bind_materials(all_materials, new_looks_folder_path)
self.set_mesh_data(all_materials, looks_path, folder_name)
self.deactivate_all_variants(looks)
# Set current variant as active
omni.kit.commands.execute(
'ChangeProperty',
prop_path=is_active_attr_path,
value=True,
prev=False,
)
self.ignore_change = False
if not self.ignore_settings_update:
self.render_active_objects_frame()
self.render_variants_frame(looks, parent_prim)
def get_meshes_from_prim(self, parent_prim):
"""
It takes a parent prim and returns a list of all the meshes that are children of that prim
:param parent_prim: The parent prim of the mesh you want to get
:return: A list of all meshes in the scene.
"""
all_meshes = []
for mesh in self.get_all_children_of_prim(parent_prim):
if mesh.GetTypeName() == "Mesh":
all_meshes.append(mesh)
return all_meshes
def get_data_from_meshes(self, all_meshes):
"""
It loops through all passed meshes, gets the materials that are bound to them, and returns a list of
dictionaries containing the material name, path, and the mesh it's bound to
:param all_meshes: a list of all the meshes in the scene
:return: A list of dictionaries.
"""
result = []
# loop through all meshes
for mesh_data in all_meshes:
# Get currently binded materials for the current mesh
current_material_prims = mesh_data.GetRelationship('material:binding').GetTargets()
# Loop through all binded materials paths
for original_material_prim_path in current_material_prims:
original_material_prim = self.stage.GetPrimAtPath(original_material_prim_path)
if not original_material_prim:
continue
result.append({
"name": original_material_prim.GetName(),
"path": original_material_prim_path,
"mesh": mesh_data.GetPath(),
})
return result
def bind_materials(self, all_materials, variant_folder_path):
"""
Look through all the materials and bind them to the meshes.
If variant_folder_path is empty, then just binds passed materials. If not, looks for the materials in the
variant folder and binds them instead using all_materials as a reference.
:param all_materials: A list of dictionaries containing the material path and the mesh path
:param variant_folder_path: The path to the variant folder
"""
# Check if there is a variant folder where new materials are stored
if variant_folder_path:
variant_materials_prim = self.stage.GetPrimAtPath(variant_folder_path)
with omni.kit.undo.group():
# loop through all passed materials
for mat_data in all_materials:
if variant_folder_path and variant_materials_prim:
# loop throug all materials in the variant folder
for var_mat in variant_materials_prim.GetChildren():
# If found material matches with the one in the all_materials list, bind it to the mesh
if var_mat.GetName() == str(mat_data["path"]).split("/")[-1]:
omni.kit.commands.execute(
"BindMaterialCommand",
prim_path=mat_data["mesh"],
material_path=var_mat.GetPath(),
strength=['weakerThanDescendants']
)
break
else:
if mat_data["mesh"] and mat_data["path"]:
# If there's no variant folder, then just bind passed material to the mesh
omni.kit.commands.execute(
'BindMaterialCommand',
material_path=mat_data["path"],
prim_path=mat_data["mesh"],
strength=['weakerThanDescendants']
)
def deactivate_all_variants(self, looks):
"""
It deactivates all variants in a given looks prim
:param looks: The looks prim
"""
looks_path = looks.GetPath()
mme_folder = looks.GetPrimAtPath("MME")
# Check if mme folder exists
if mme_folder:
# MMEisActive also present in MME folder, so we need to set it to False as well.
mme_folder_prop_path = Sdf.Path(f"{looks_path}/MME.MMEisActive")
mme_is_active = self.stage.GetAttributeAtPath(mme_folder_prop_path).Get()
if mme_is_active:
omni.kit.commands.execute(
'ChangeProperty',
prop_path=mme_folder_prop_path,
value=False,
prev=True,
)
# Loop through all variants in the MME folder and deactivate them
for look in mme_folder.GetChildren():
p_type = look.GetTypeName()
if p_type == "Scope":
look_is_active_path = Sdf.Path(f"{looks_path}/MME/{look.GetName()}.MMEisActive")
look_is_active = self.stage.GetAttributeAtPath(look_is_active_path).Get()
if look_is_active:
omni.kit.commands.execute(
'ChangeProperty',
prop_path=look_is_active_path,
value=False,
prev=True,
)
def get_parent_from_mesh(self, mesh_prim):
"""
It takes a mesh prim as an argument and returns the first Xform prim it finds in the prim's ancestry
:param mesh_prim: The mesh prim you want to get the parent of
:return: The parent of the mesh_prim.
"""
parent_prim = mesh_prim.GetParent()
default_prim = self.stage.GetDefaultPrim()
if not default_prim:
return
default_prim_name = default_prim.GetName()
rootname = f"/{default_prim_name}"
while True:
if parent_prim is None or parent_prim.IsPseudoRoot():
return parent_prim
if str(parent_prim.GetPath()) == "/" or str(parent_prim.GetPath()) == rootname:
return None
if parent_prim.GetPrimAtPath("Looks") and str(parent_prim.GetPath()) != rootname:
return parent_prim
parent_prim = parent_prim.GetParent()
return parent_prim
def get_looks_folder(self, parent_prim):
"""
If the parent_prim has a child prim named "Looks", return that found prim. Otherwise, return None
:param parent_prim: The parent prim of the looks folder
:return: The looks folder if it exists, otherwise None.
"""
if not parent_prim:
return None
looks_folder = parent_prim.GetPrimAtPath("Looks")
return looks_folder if looks_folder else None
def check_if_original_active(self, mme_folder):
"""
If the folder has an attribute called "MMEisActive" and it's value is True, return the folder and True.
Otherwise, return the folder and False
:param mme_folder: The folder that contains the MME data
:return: the mme_folder and a boolean value.
"""
if mme_folder:
mme_is_active_attr = mme_folder.GetAttribute("MMEisActive")
if mme_is_active_attr and mme_is_active_attr.Get():
return mme_folder, True
return mme_folder, False
def get_currently_active_folder(self, looks):
"""
It looks for a folder called "MME" in the "Looks" folder, and if it finds it, it search for a folder inside
with an attribute MMEisActive set to True and returns it if it finds one.
If it doesn't find one, it returns None.
:param looks: The looks node
:return: The currently active folder.
"""
mme_folder = looks.GetPrimAtPath("MME")
if mme_folder:
if mme_folder.GetTypeName() == "Scope":
for look in mme_folder.GetChildren():
if look.GetTypeName() == "Scope":
is_active_attr = look.GetAttribute("MMEisActive")
if is_active_attr and is_active_attr.Get():
return look
return None
def update_material_data(self, latest_action):
"""
It updates the material data in the looks folder when a material is changed using data from the latest action.
All data is converted into string and encrypted into base64 to prevent it from being seen or modified
by the user.
:param latest_action: The latest action that was performed in the scene
:return: The return value is a list of dictionaries.
"""
if "prim_path" not in latest_action.kwargs or "material_path" not in latest_action.kwargs:
return
prim_path = latest_action.kwargs["prim_path"]
if not prim_path:
return
if type(prim_path) == list:
prim_path = prim_path[0]
new_material_path = latest_action.kwargs["material_path"]
if not new_material_path:
return
if type(new_material_path) == list:
new_material_path = new_material_path[0]
parent_mesh = self.get_parent_from_mesh(self.stage.GetPrimAtPath(prim_path))
looks = self.get_looks_folder(parent_mesh)
if looks:
looks_path = looks.GetPath()
mme_folder, is_original_active = self.check_if_original_active(looks.GetPrimAtPath("MME"))
if is_original_active:
folder_name = None
else:
active_folder = self.get_currently_active_folder(looks)
if not active_folder:
return
folder_name = active_folder.GetName()
mesh_data = self.get_mesh_data(looks_path, folder_name)
mesh_data_to_update = []
previous_mats = []
unique_mats = []
mesh_mats = {}
if mesh_data:
for mat_data in mesh_data:
material_prim = self.stage.GetPrimAtPath(new_material_path)
mat_name = material_prim.GetName()
if mat_data["mesh"] == prim_path and mat_data["path"] != new_material_path:
carb.log_warn("Material changes detected. Updating material data...")
if mat_data["path"] in previous_mats:
unique_mats.append(mat_data["path"])
mesh_mats[mat_name] = True
else:
mesh_mats[mat_name] = False
previous_mats.append(mat_data["path"])
mat_data["path"] = new_material_path
mesh_data_to_update.append(mat_data)
else:
mesh_mats[mat_name] = False
if not is_original_active and folder_name:
active_folder_path = active_folder.GetPath()
# Copy material's prim as text
usd_code = get_prim_as_text(
self.stage,
[Sdf.Path(i["path"]) for i in mesh_data if i["path"] not in unique_mats]
)
mats_to_delete = [i.GetPath() for i in active_folder.GetChildren() if str(i.GetPath()) not in unique_mats and not mesh_mats.get(i.GetName(), False)]
if mats_to_delete:
omni.kit.commands.execute(
'DeletePrims',
paths=mats_to_delete,
)
# put the clone material into the scene
text_to_stage(self.stage, usd_code, active_folder_path)
self.ignore_change = True
self.bind_materials(mesh_data_to_update, active_folder_path)
self.ignore_change = False
self.set_mesh_data(mesh_data, looks_path, folder_name)
self.render_current_materials_frame(parent_mesh)
def on_change(self):
"""
Everytime the user changes the scene, this method is called.
Method does the following:
It checks if the user has changed the material, and if so, it updates the material data in the apropriate
variant folder or save it into the MME folder if the variant is set to \"original\".
It checks if the selected object has a material, and if it does, it renders a new window with the material's
properties of the selected object.
If the selected object doesn't have a material, it renders a new window with a prompt to select an object with
a material.
:return: None
"""
if not self.stage:
self._usd_context = omni.usd.get_context()
self._selection = self._usd_context.get_selection()
self.stage = self._usd_context.get_stage()
# Get history of commands
current_history = reversed(omni.kit.undo.get_history().values())
# Get the latest one
try:
latest_action = next(current_history)
except StopIteration:
return
if latest_action.name == "ChangePrimVarCommand" and latest_action.level == 1:
latest_action = next(current_history)
if self.ignore_next_select and latest_action.name == "SelectPrimsCommand":
self.ignore_next_select = False
omni.kit.commands.execute('Undo')
else:
self.ignore_next_select = False
if latest_action.name not in self.allowed_commands:
return
# To skip the changes made by the addon
if self.ignore_change:
return
show_default_layout = True
if latest_action.name in ["BindMaterial", "BindMaterialCommand"]:
self.update_material_data(latest_action)
return
# Get the top-level prim (World)
default_prim = self.stage.GetDefaultPrim()
if not default_prim:
return
default_prim_name = default_prim.GetName()
rootname = f"/{default_prim_name}"
# Get currently selected prim
paths = self._selection.get_selected_prim_paths()
if paths:
# Get path of the first selected prim
base_path = paths[0] if len(paths) > 0 else None
base_path = Sdf.Path(base_path) if base_path else None
if base_path:
# Skip if the prim is the root of the stage to avoid unwanted errors
if base_path == rootname:
return
# Skip if this object was already selected previously. Protection from infinite loop.
if base_path == self.latest_selected_prim:
return
# Save the path of the currently selected prim for the next iteration
self.latest_selected_prim = base_path
# Get prim from path
prim = self.stage.GetPrimAtPath(base_path)
if prim:
p_type = prim.GetTypeName()
# This is needed to successfully get the prim even if it's child was selected
if p_type == "Mesh" or p_type == "Scope" or p_type == "Material":
prim = self.get_parent_from_mesh(prim)
elif p_type == "Xform":
# Current prim is already parental one, so we don't need to do anything.
pass
else:
# In case if something unexpected is selected, we just return None
carb.log_warn(f"Selected {prim} does not has any materials or has invalid type.")
return
if not prim:
self.render_scenelevel_frame()
return
if prim.GetPrimAtPath("Looks") and prim != self.latest_selected_prim:
# Save the type of the rendered window
self.current_ui = "object"
# Render new window for the selected prim
self.render_objectlevel_frame(prim)
if not self.ignore_settings_update:
self.render_active_objects_frame()
show_default_layout = False
if show_default_layout and self.current_ui != "default":
self.current_ui = "default"
self.render_scenelevel_frame()
if not self.ignore_settings_update:
self.render_active_objects_frame()
self.latest_selected_prim = None
def _get_looks(self, path):
"""
It gets the prim at the path, checks if it's a mesh, scope, or material, and if it is, it gets the parent prim.
If it's an xform, it does nothing. If it's something else, it returns None
:param path: The path to the prim you want to get the looks from
:return: The prim and the looks.
"""
prim = self.stage.GetPrimAtPath(path)
p_type = prim.GetTypeName()
# User could select not the prim directly but sub-items of it, so we need to make sure in any scenario
# we will get the parent prim.
if p_type == "Mesh" or p_type == "Scope" or p_type == "Material":
prim = self.get_parent_from_mesh(prim)
elif p_type == "Xform":
# Current prim is already parental one, so we don't need to do anything.
pass
else:
# In case if something unexpected is selected, we just return None
carb.log_error("No selected prim")
return None, None
# Get all looks (a.k.a. materials)
looks = prim.GetPrimAtPath("Looks").GetChildren()
# return a parental prim object and its looks
return prim, looks
def get_all_materials_variants(self, looks_prim):
"""
It returns a list of all the variants in the MME folder
:param looks_prim: The prim that contains the MME folder
:return: A list of all the variants in the MME folder.
"""
variants = []
mme_folder = looks_prim.GetPrimAtPath("MME")
if mme_folder:
for child in mme_folder.GetChildren():
if child.GetTypeName() == "Scope":
variants.append(child)
return variants
def get_mesh_data(self, looks_path, folder_name):
"""
It gets the mesh data from the folder you pass as a parameter.
It does decode it back from base64 and returns it as a dictionary.
:param looks_path: The path to the looks prim
:param folder_name: The name of the folder that contains the mesh data
:return: A list of dictionaries.
"""
if folder_name:
data_attr_path = Sdf.Path(f"{looks_path}/MME/{folder_name}.MMEMeshData")
else:
data_attr_path = Sdf.Path(f"{looks_path}/MME.MMEMeshData")
data_attr = self.stage.GetAttributeAtPath(data_attr_path)
if data_attr:
attr_value = data_attr.Get()
if attr_value:
result = []
# decode base64 string and load json
for item in attr_value:
result.append(json.loads(base64.b64decode(item).decode("utf-8")))
return result
def set_mesh_data(self, mesh_materials, looks_path, folder_name):
"""
It creates a custom attribute on a USD prim, and sets the value of that attribute to a list of base64
encoded JSON strings
:param mesh_materials: A list of dictionaries containing the following keys: path, mesh
:param looks_path: The path to the looks prim
:param folder_name: The name of the folder that contains the mesh data
"""
# Convert every Path to string in mesh_materials to be able to pass it into JSON
all_materials = [{
"path": str(mat_data["path"]),
"mesh": str(mat_data["mesh"]),
} for mat_data in mesh_materials]
if folder_name:
data_attr_path = Sdf.Path(f"{looks_path}/MME/{folder_name}.MMEMeshData")
else:
data_attr_path = Sdf.Path(f"{looks_path}/MME.MMEMeshData")
omni.kit.commands.execute(
'CreateUsdAttributeOnPath',
attr_path=data_attr_path,
attr_type=Sdf.ValueTypeNames.StringArray,
custom=True,
variability=Sdf.VariabilityVarying,
attr_value=[base64.b64encode(json.dumps(i).encode()) for i in all_materials],
)
def delete_variant(self, prim_path, looks, parent_prim):
"""
It deletes the variant prim and then re-renders the variants frame
:param prim_path: The path to the variant prim you want to delete
:param looks: a list of all the looks in the current scene
:param parent_prim: The prim path of the parent prim of the variant set
"""
omni.kit.commands.execute('DeletePrims', paths=[prim_path, ])
self.render_variants_frame(looks, parent_prim)
def enable_variant(self, folder_name, looks, parent_prim, ignore_changes=True, ignore_select=False):
"""
It takes a folder name, a looks prim, and a parent prim, and then it activates the variant in the folder,
binds the materials in the variant, and renders the variant and current materials frames
:param folder_name: The name of the folder that contains the materials you want to enable
:param looks: the looks prim
:param parent_prim: The prim that contains the variant sets
"""
if ignore_changes:
self.ignore_change = True
if folder_name is None:
new_looks_folder = looks.GetPrimAtPath("MME")
else:
new_looks_folder = looks.GetPrimAtPath(f"MME/{folder_name}")
new_looks_folder_path = new_looks_folder.GetPath()
all_materials = self.get_mesh_data(looks.GetPath(), folder_name)
self.deactivate_all_variants(looks)
is_active_attr_path = Sdf.Path(f"{new_looks_folder_path}.MMEisActive")
omni.kit.commands.execute(
'ChangeProperty',
prop_path=is_active_attr_path,
value=True,
prev=False,
)
self.bind_materials(all_materials, None if folder_name is None else new_looks_folder_path)
if ignore_select:
self.ignore_next_select = True
self.render_variants_frame(looks, parent_prim, ignore_widget=True)
self.render_current_materials_frame(parent_prim)
if ignore_changes:
self.ignore_change = False
def select_material(self, associated_mesh):
"""
It selects the material of the mesh that is currently selected in the viewport
:param associated_mesh: The path to the mesh you want to select the material for
"""
if associated_mesh:
mesh = self.stage.GetPrimAtPath(associated_mesh)
if mesh:
current_material_prims = mesh.GetRelationship('material:binding').GetTargets()
if current_material_prims:
omni.usd.get_context().get_selection().set_prim_path_selected(
str(current_material_prims[0]), True, True, True, True)
ui.Workspace.show_window("Property", True)
property_window = ui.Workspace.get_window("Property")
ui.WindowHandle.focus(property_window)
def render_variants_frame(self, looks, parent_prim, ignore_widget=False):
"""
It renders the variants frame, it contains all the variants of the current prim
:param parent_prim: The prim that contains the variants
"""
# Checking if any of the variants are active.
is_variants_active = False
all_variants = self.get_all_materials_variants(looks)
for variant_prim in all_variants:
is_active_attr = variant_prim.GetAttribute("MMEisActive")
if is_active_attr:
if is_active_attr.Get():
is_variants_active = True
break
# Checking if the is_variants_active variable is True or False. If it is True, then the active_status variable
# is set to an empty string. If it is False, then the active_status variable is set to ' (Active)'.
active_status = '' if is_variants_active else ' (Active)'
# Creating a frame in the UI.
if not self.variants_frame_original:
self.variants_frame_original = ui.Frame(
name="variants_frame_original",
identifier="variants_frame_original"
)
with self.variants_frame_original:
with ui.CollapsableFrame(f"Original{active_status}",
height=ui.Pixel(10),
collapsed=is_variants_active):
with ui.VStack():
ui.Label("Your original, unmodified materials. Cannot be deleted.", name="variant_label", height=40)
if is_variants_active:
with ui.HStack():
ui.Button(
"Enable",
name="variant_button",
clicked_fn=lambda: self.enable_variant(None, looks, parent_prim))
if not self.variants_frame:
self.variants_frame = ui.Frame(name="variants_frame", identifier="variants_frame")
with self.variants_frame:
with ui.VStack(height=ui.Pixel(10)):
for variant_prim in all_variants:
# Creating a functions that will be called later in this loop.
prim_name = variant_prim.GetName()
prim_path = variant_prim.GetPath()
is_active_attr = variant_prim.GetAttribute("MMEisActive")
if is_active_attr:
# Checking if the attribute is_active_attr is active.
is_active = is_active_attr.Get()
active_status = ' (Active)' if is_active else ''
with ui.CollapsableFrame(f"{variant_prim.GetName()}{active_status}",
height=ui.Pixel(10),
collapsed=not is_active):
with ui.VStack(height=ui.Pixel(10)):
with ui.HStack():
if not active_status:
ui.Button(
"Enable",
name="variant_button",
clicked_fn=lambda p_name=prim_name: self.enable_variant(
p_name,
looks,
parent_prim
))
ui.Button(
"Delete",
name="variant_button",
clicked_fn=lambda p_path=prim_path: self.delete_variant(
p_path,
looks,
parent_prim
))
else:
label_text = "This variant is enabled.\nMake changes to the active materials" \
"from above to edit this variant.\nAll changes will be saved automatically."
ui.Label(label_text, name="variant_label", height=40)
if not ignore_widget and self.get_setting("MMEEnableViewportUI"):
if hasattr(self, "_widget_info_viewport") and self._widget_info_viewport:
self._widget_info_viewport.destroy()
self._widget_info_viewport = None
if len(all_variants) > 0:
# Get the active viewport (which at startup is the default Viewport)
viewport_window = get_active_viewport_window()
# Issue an error if there is no Viewport
if not viewport_window:
carb.log_warn(f"No Viewport Window to add {self.ext_id} scene to")
self._widget_info_viewport = None
return
if hasattr(self, "ext_id"):
print("ext_id", self.ext_id)
# Build out the scene
self._widget_info_viewport = WidgetInfoScene(
viewport_window,
self.ext_id,
all_variants=all_variants,
enable_variant=self.enable_variant,
looks=looks,
check_visibility=self.get_setting,
parent_prim=parent_prim
)
return self.variants_frame_original, self.variants_frame
def get_closest_mme_object(self):
"""
If the user has enabled the roaming mode, then we get the camera position and the list of all visible MME objects.
We then find the closest MME object to the camera and render the widget for that object.
:return: The closest prim to the currently active camera.
"""
if not self.get_setting("MMEEnableRoamingMode", False):
return False
camera_prim = self.stage.GetPrimAtPath(get_active_viewport_camera_path())
camera_position = camera_prim.GetAttribute("xformOp:translate").Get()
window = get_active_viewport_window()
mme_objects = self.get_mme_valid_objects_on_stage()
all_visible_prims = []
for prim in mme_objects:
ui_position, is_visible = get_ui_position_for_prim(window, prim.GetPath())
if is_visible:
all_visible_prims.append(prim)
closest_prim = None
closest_distance = 0
for prim in all_visible_prims:
prim_position = prim.GetAttribute("xformOp:translate").Get()
distance = math.sqrt(
(prim_position[0] - camera_position[0]) ** 2 + (prim_position[1] - camera_position[1]) ** 2 + (prim_position[2] - camera_position[2]) ** 2
)
if closest_distance > self.get_setting("MMEMaxVisibleDistance", 500):
closest_prim = None
continue
if not closest_prim:
closest_prim = prim
closest_distance = distance
elif distance < closest_distance:
closest_prim = prim
closest_distance = distance
if not hasattr(self, "last_roaming_prim"):
self.last_roaming_prim = closest_prim
return
if closest_distance > 0 and closest_prim and self.last_roaming_prim != closest_prim:
self.last_roaming_prim = closest_prim
self.render_objectlevel_frame(closest_prim)
if hasattr(self, "_widget_info_viewport") and self._widget_info_viewport:
self._widget_info_viewport.info_manipulator.model._on_kit_selection_changed()
elif not closest_prim:
if hasattr(self, "latest_selected_prim") and self.latest_selected_prim:
return
self.last_roaming_prim = None
self.render_scenelevel_frame()
if hasattr(self, "_widget_info_viewport") and self._widget_info_viewport:
self._widget_info_viewport.destroy()
return closest_prim
def get_all_children_of_prim(self, prim):
"""
It takes a prim as an argument and returns a list of all the prims that are children of that prim
:param prim: The prim you want to get the children of
:return: A list of all the children of the prim.
"""
children = []
for child in prim.GetChildren():
children.append(child)
children.extend(self.get_all_children_of_prim(child))
return children
def render_current_materials_frame(self, prim):
"""
It loops through all meshes of the selected prim, gets all materials that are binded to the mesh, and then loops
through all materials and renders a button for each material
:param prim: The prim to get all children of
:return: The return value is a ui.Frame object.
"""
all_meshes = []
all_mat_paths = []
# Get all meshes
for mesh in self.get_all_children_of_prim(prim):
if mesh.GetTypeName() == "Mesh":
material_paths = mesh.GetRelationship('material:binding').GetTargets()
all_meshes.append({"mesh": mesh, "material_paths": material_paths})
for original_material_prim_path in material_paths:
all_mat_paths.append(original_material_prim_path)
materials_quantity = len(list(dict.fromkeys(all_mat_paths)))
processed_materials = []
scrolling_frame_height = ui.Percent(80)
materials_column_count = 1
if materials_quantity < 2:
scrolling_frame_height = ui.Percent(50)
elif materials_quantity < 4:
scrolling_frame_height = ui.Percent(70)
elif materials_quantity > 6:
materials_column_count = 2
scrolling_frame_height = ui.Percent(100)
if not self.materials_frame:
self.materials_frame = ui.Frame(name="materials_frame", identifier="materials_frame")
with self.materials_frame:
with ui.ScrollingFrame(height=scrolling_frame_height):
with ui.VGrid(column_count=materials_column_count, height=ui.Pixel(10)):
material_counter = 1
# loop through all meshes
for mesh_data in all_meshes:
def sl_mat_fn(mesh_path=mesh_data["mesh"].GetPath()):
return self.select_material(mesh_path)
# Get currently binded materials for the current mesh
current_material_prims = mesh_data["material_paths"]
# Loop through all binded materials paths
for original_material_prim_path in current_material_prims:
if original_material_prim_path in processed_materials:
continue
# Get the material prim from path
original_material_prim = self.stage.GetPrimAtPath(original_material_prim_path)
if not original_material_prim:
continue
with ui.HStack():
if materials_column_count == 1:
ui.Spacer(height=10, width=10)
ui.Label(
f"{material_counter}.",
name="material_counter",
width=20 if materials_column_count == 1 else 50,
)
ui.Image(
height=24,
width=24,
name="material_preview",
fill_policy=ui.FillPolicy.PRESERVE_ASPECT_FIT
)
if materials_column_count == 1:
ui.Spacer(height=10, width=10)
ui.Label(
original_material_prim.GetName(),
elided_text=True,
name="material_name"
)
ui.Button(
"Select",
name="variant_button",
width=ui.Percent(30),
clicked_fn=sl_mat_fn,
)
material_counter += 1
processed_materials.append(original_material_prim_path)
if len(all_mat_paths) == 0:
ui.Label(
"No materials were found. Please make sure that the selected model is valid.",
name="main_hint",
height=30
)
ui.Spacer(height=10)
return self.materials_frame
def render_objectlevel_frame(self, prim):
"""
It renders a frame with a list of all the variants of a given object, and a list of all the materials of the
currently active variant.
:param prim: The prim that is currently selected in the viewport
:return: The main_frame is being returned.
"""
if not prim:
return
looks = prim.GetPrimAtPath("Looks")
if not hasattr(self, "variants_frame") or self.variants_frame:
self.variants_frame = None
if not hasattr(self, "variants_frame_original") or self.variants_frame_original:
self.variants_frame_original = None
if not hasattr(self, "materials_frame") or self.materials_frame:
self.materials_frame = None
if not hasattr(self, "main_frame") or not self.main_frame:
self.main_frame = ui.Frame(name="main_frame", identifier="main_frame")
with self.main_frame:
with ui.VStack(style=_style):
with ui.HStack(height=ui.Pixel(10), name="label_container"):
ui.Spacer(width=10)
ui.Label(prim.GetName(), name="main_label", height=ui.Pixel(10))
ui.Spacer(height=6)
ui.Separator(height=6)
ui.Spacer(height=10)
with ui.HStack(height=ui.Pixel(30)):
ui.Spacer(width=10)
ui.Label("Active materials", name="secondary_label")
self.render_current_materials_frame(prim)
with ui.HStack(height=ui.Pixel(30)):
ui.Spacer(width=10)
ui.Label("All variants", name="secondary_label")
with ui.ScrollingFrame():
with ui.VStack():
self.render_variants_frame(looks, prim)
ui.Spacer(height=10)
ui.Button(
"Add new variant",
height=30,
clicked_fn=lambda: self.add_variant(looks, prim),
alignment=ui.Alignment.CENTER_BOTTOM,
tooltip="Create a new variant, based on the current look",
)
def open_scene_settings(self):
"""
If the settings window is not open, render the settings layout, set the settings window to open, and then
show the settings window. If the settings window is open, render the active objects frame, and then show
the settings window
"""
if not self.is_settings_open:
self.render_scene_settings_layout(dock_in=True)
self.is_settings_open = True
else:
self.render_active_objects_frame()
ui.Workspace.show_window(self.SCENE_SETTINGS_WINDOW_NAME, True)
scene_settings_window = ui.Workspace.get_window(self.SCENE_SETTINGS_WINDOW_NAME)
ui.WindowHandle.focus(scene_settings_window)
def render_scenelevel_frame(self):
"""
It creates a frame with a hint and a button to open the settings window.
:return: The main_frame is being returned.
"""
if not hasattr(self, "main_frame") or not self.main_frame:
self.main_frame = ui.Frame(name="main_frame", identifier="main_frame")
with self.main_frame:
with ui.VStack(style=_style):
ui.Spacer()
with ui.VStack():
ui.Label("Please select any object to see its materials", name="main_hint", height=30)
ui.Label("or", name="main_hint_small", height=10)
ui.Spacer(height=5)
with ui.HStack(height=ui.Pixel(10)):
ui.Spacer()
ui.Button(
"Open settings",
height=20,
width=150,
name="open_mme_settings",
clicked_fn=self.open_scene_settings,
)
ui.Spacer()
ui.Spacer()
return self.main_frame
def render_default_layout(self, prim=None):
"""
It's a function that renders a default layout for the UI
:param prim: The prim that is selected in the viewport
"""
if self.main_frame:
self.main_frame = None
if self.variants_frame:
self.variants_frame = None
if self.variants_frame_original:
self.variants_frame_original = None
if self._window:
self._window.destroy()
self._window = None
self._window = ui.Window(self.WINDOW_NAME, width=300, height=300)
with self._window.frame:
if not prim:
self.render_scenelevel_frame()
else:
self.render_objectlevel_frame(prim)
# SCENE SETTINGS
def is_MME_exists(self, prim):
"""
A recursive method that checks if the prim has a MME prim in its hierarchy
:param prim: The prim to check
:return: A boolean value.
"""
for child in prim.GetChildren():
if child.GetName() == "Looks":
if child.GetPrimAtPath("MME"):
return True
else:
return False
if self.is_MME_exists(child):
return True
return False
def get_mme_valid_objects_on_stage(self):
"""
Returns a list of valid objects on the stage.
"""
if not self.stage:
return []
valid_objects = []
default_prim = self.stage.GetDefaultPrim()
# Get all objects and check if it has Looks folder
for obj in default_prim.GetAllChildren():
if obj:
if self.is_MME_exists(obj):
valid_objects.append(obj)
return valid_objects
def select_prim(self, prim_path):
"""
It selects the prim at the given path, shows the property window, and focuses it
:param prim_path: The path to the prim you want to select
"""
self.ignore_settings_update = True
omni.kit.commands.execute(
'SelectPrimsCommand',
old_selected_paths=[],
new_selected_paths=[str(prim_path), ],
expand_in_stage=True
)
ui.Workspace.show_window(self.WINDOW_NAME, True)
property_window = ui.Workspace.get_window(self.WINDOW_NAME)
ui.WindowHandle.focus(property_window)
self.ignore_settings_update = False
def check_stage(self):
"""
It gets the current stage from the USD context
"""
if not hasattr(self, "stage") or not self.stage:
self._usd_context = omni.usd.get_context()
self.stage = self._usd_context.get_stage()
def set_setting(self, value, attribute_name, create_only=False):
"""
It checks if the attribute for showing viewport ui exists, under the DefaultPrim
if it doesn't, it creates it, but if it does, it changes the value instead
:param value: True or False
:param create_only: If True, the attribute will only be created if it doesn't exist,
defaults to False (optional)
:return: The return value is the value of the attribute.
"""
self.check_stage()
if not self.stage:
return
# Get DefaultPrim from Stage
default_prim = self.stage.GetDefaultPrim()
# Get attribute from DefaultPrim if it exists
attribute = default_prim.GetAttribute(attribute_name)
attribute_path = attribute.GetPath()
# check if attribute exists
if not attribute:
# if not, create it
omni.kit.commands.execute(
'CreateUsdAttributeOnPath',
attr_path=attribute_path,
attr_type=Sdf.ValueTypeNames.Bool,
custom=True,
attr_value=value,
variability=Sdf.VariabilityVarying
)
else:
if attribute.Get() == value or create_only:
return
omni.kit.commands.execute(
'ChangeProperty',
prop_path=attribute_path,
value=value,
prev=not value,
)
def get_setting(self, attribute_name, default_value=True):
"""
It gets the value of an attribute from the default prim of the stage
:param attribute_name: The name of the attribute you want to get
:param default_value: The value to return if the attribute doesn't exist, defaults to True (optional)
:return: The value of the attribute.
"""
self.check_stage()
if not self.stage:
return
# Get DefaultPrim from Stage
default_prim = self.stage.GetDefaultPrim()
# Get attribute from DefaultPrim called
attribute = default_prim.GetAttribute(attribute_name)
if attribute:
return attribute.Get()
else:
return default_value # Attribute was not created yet, so we return default_value
def render_active_objects_frame(self, valid_objects=None):
"""
It creates a UI frame with a list of buttons that select objects in the scene
:param valid_objects: a list of objects that have variants
:return: The active_objects_frame is being returned.
"""
if not valid_objects:
valid_objects = self.get_mme_valid_objects_on_stage()
objects_quantity = len(valid_objects)
objects_column_count = 1
if objects_quantity > 6:
objects_column_count = 2
if not self.active_objects_frame:
self.active_objects_frame = ui.Frame(name="active_objects_frame", identifier="active_objects_frame")
with self.active_objects_frame:
with ui.VGrid(column_count=objects_column_count):
material_counter = 1
# loop through all meshes
for prim in valid_objects:
if not prim:
continue
with ui.HStack():
if objects_column_count == 1:
ui.Spacer(height=10, width=10)
ui.Label(
f"{material_counter}.",
name="material_counter",
width=20 if objects_column_count == 1 else 50,
)
if objects_column_count == 1:
ui.Spacer(height=10, width=10)
ui.Label(
prim.GetName(),
elided_text=True,
name="material_name"
)
ui.Button(
"Select",
name="variant_button",
width=ui.Percent(30),
clicked_fn=lambda mesh_path=prim.GetPath(): self.select_prim(mesh_path),
)
material_counter += 1
if objects_quantity == 0:
ui.Label(
"No models with variants were found.",
name="main_hint",
height=30
)
ui.Spacer(height=10)
return self.active_objects_frame
def render_scene_settings_layout(self, dock_in=False):
"""
It renders a window with a list of objects in the scene that have variants and some settings.
Called only once, all interactive elements are updated through the frames.
"""
valid_objects = self.get_mme_valid_objects_on_stage()
if self._window_scenemanager:
self._window_scenemanager.destroy()
self._window_scenemanager = None
self._window_scenemanager = ui.Window(self.SCENE_SETTINGS_WINDOW_NAME, width=300, height=300)
if dock_in:
self._window_scenemanager.deferred_dock_in(self.WINDOW_NAME)
if self.active_objects_frame:
self.active_objects_frame = None
with self._window_scenemanager.frame:
with ui.VStack(style=_style):
with ui.HStack(height=ui.Pixel(10), name="label_container"):
ui.Spacer(width=10)
ui.Label(self.SCENE_SETTINGS_WINDOW_NAME, name="main_label", height=ui.Pixel(10))
ui.Spacer(height=6)
ui.Separator(height=6)
ui.Spacer(height=10)
with ui.HStack(height=ui.Pixel(30)):
ui.Spacer(width=10)
ui.Label("Models with variants in your scene", name="secondary_label")
ui.Spacer(height=40)
with ui.ScrollingFrame(height=ui.Pixel(100)):
self.render_active_objects_frame(valid_objects)
ui.Spacer(height=10)
with ui.HStack(height=ui.Pixel(30)):
ui.Spacer(width=10)
ui.Label("Settings", name="secondary_label")
ui.Spacer(height=10)
ui.Separator(height=6)
with ui.ScrollingFrame():
with ui.VStack():
ui.Spacer(height=5)
with ui.HStack(height=20):
ui.Spacer(width=ui.Percent(5))
ui.Label("Enable viewport widget rendering:", width=ui.Percent(70))
ui.Spacer(width=ui.Percent(10))
# Creating a checkbox and setting the value to the value of the get_setting()
# function.
self.enable_viewport_ui = ui.CheckBox(width=ui.Percent(15))
self.enable_viewport_ui.model.set_value(self.get_setting("MMEEnableViewportUI"))
self.enable_viewport_ui.model.add_value_changed_fn(
lambda value: self.set_setting(value.get_value_as_bool(), "MMEEnableViewportUI")
)
ui.Spacer(height=10)
ui.Separator(height=6)
with ui.HStack(height=20):
# Window will appear if you look at the object in the viewport, instead of clicking on it
ui.Spacer(width=ui.Percent(5))
ui.Label("Roaming mode:", width=ui.Percent(70))
ui.Spacer(width=ui.Percent(10))
self.enable_roaming_mode = ui.CheckBox(width=ui.Percent(15))
self.enable_roaming_mode.model.set_value(self.get_setting("MMEEnableRoamingMode", False))
self.enable_roaming_mode.model.add_value_changed_fn(
lambda value: self.set_setting(value.get_value_as_bool(), "MMEEnableRoamingMode")
)
ui.Spacer(height=10)
ui.Separator(height=6)
| 61,602 | Python | 44.329654 | 168 | 0.542271 |
Vadim-Karpenko/omniverse-material-manager-extended/exts/karpenko.materialsmanager.ext/karpenko/materialsmanager/ext/__init__.py | from .extension import *
| 25 | Python | 11.999994 | 24 | 0.76 |
Vadim-Karpenko/omniverse-material-manager-extended/exts/karpenko.materialsmanager.ext/karpenko/materialsmanager/ext/viewport_ui/widget_info_manipulator.py | # Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["WidgetInfoManipulator"]
from omni.ui import color as cl
from omni.ui import scene as sc
import omni.ui as ui
from ..style import viewport_widget_style
class _ViewportLegacyDisableSelection:
"""Disables selection in the Viewport Legacy"""
def __init__(self):
self._focused_windows = None
focused_windows = []
try:
# For some reason is_focused may return False, when a Window is definitely in fact is the focused window!
# And there's no good solution to this when mutliple Viewport-1 instances are open; so we just have to
# operate on all Viewports for a given usd_context.
import omni.kit.viewport_legacy as vp
vpi = vp.acquire_viewport_interface()
for instance in vpi.get_instance_list():
window = vpi.get_viewport_window(instance)
if not window:
continue
focused_windows.append(window)
if focused_windows:
self._focused_windows = focused_windows
for window in self._focused_windows:
# Disable the selection_rect, but enable_picking for snapping
window.disable_selection_rect(True)
except Exception:
pass
class _DragPrioritize(sc.GestureManager):
"""Refuses preventing _DragGesture."""
def can_be_prevented(self, gesture):
# Never prevent in the middle of drag
return gesture.state != sc.GestureState.CHANGED
def should_prevent(self, gesture, preventer):
if preventer.state == sc.GestureState.BEGAN or preventer.state == sc.GestureState.CHANGED:
return True
class _DragGesture(sc.DragGesture):
""""Gesture to disable rectangle selection in the viewport legacy"""
def __init__(self):
super().__init__(manager=_DragPrioritize())
def on_began(self):
# When the user drags the slider, we don't want to see the selection
# rect. In Viewport Next, it works well automatically because the
# selection rect is a manipulator with its gesture, and we add the
# slider manipulator to the same SceneView.
# In Viewport Legacy, the selection rect is not a manipulator. Thus it's
# not disabled automatically, and we need to disable it with the code.
self.__disable_selection = _ViewportLegacyDisableSelection()
def on_ended(self):
# This re-enables the selection in the Viewport Legacy
self.__disable_selection = None
class WidgetInfoManipulator(sc.Manipulator):
def __init__(self, all_variants, enable_variant, looks, parent_prim, check_visibility, **kwargs):
super().__init__(**kwargs)
self.destroy()
self.all_variants = all_variants
self.enable_variant = enable_variant
self.looks = looks
self.parent_prim = parent_prim
self.check_visibility = check_visibility
self._radius = 2
self._distance_to_top = 5
self._thickness = 2
self._radius_hovered = 20
self.prev_button = None
self.next_button = None
def destroy(self):
self._root = None
self._slider_subscription = None
self._slider_model = None
self._name_label = None
self.prev_button = None
self.next_button = None
self.all_variants = None
self.enable_variant = None
self.looks = None
self.parent_prim = None
def _on_build_widgets(self):
with ui.ZStack(height=70, style=viewport_widget_style):
ui.Rectangle(
style={
"background_color": cl(0.2),
"border_color": cl(0.7),
"border_width": 2,
"border_radius": 4,
}
)
with ui.VStack():
ui.Spacer(height=4)
with ui.HStack():
ui.Spacer(width=10)
self.prev_button = ui.Button("Prev", width=100)
self._name_label = ui.Label(
"",
elided_text=True,
name="name_label",
height=0,
alignment=ui.Alignment.CENTER_BOTTOM
)
self.next_button = ui.Button("Next", width=100)
ui.Spacer(width=10)
# setup some model, just for simple demonstration here
self._slider_model = ui.SimpleIntModel()
ui.Spacer(height=5)
with ui.HStack(style={"font_size": 26}):
ui.Spacer(width=5)
ui.IntSlider(self._slider_model, min=0, max=len(self.all_variants))
ui.Spacer(width=5)
ui.Spacer(height=24)
ui.Spacer()
self.on_model_updated(None)
# Additional gesture that prevents Viewport Legacy selection
self._widget.gestures += [_DragGesture()]
def on_build(self):
"""Called when the model is changed and rebuilds the whole slider"""
self._root = sc.Transform(visible=False)
with self._root:
with sc.Transform(scale_to=sc.Space.SCREEN):
with sc.Transform(transform=sc.Matrix44.get_translation_matrix(0, 100, 0)):
# Label
with sc.Transform(look_at=sc.Transform.LookAt.CAMERA):
self._widget = sc.Widget(500, 130, update_policy=sc.Widget.UpdatePolicy.ON_MOUSE_HOVERED)
self._widget.frame.set_build_fn(self._on_build_widgets)
# Update the slider
def update_variant(self, value):
if not self._root or not self._root.visible or not self.looks or not self.parent_prim:
return
if value == 0:
self.enable_variant(None, self.looks, self.parent_prim, ignore_select=True)
else:
selected_variant = self.all_variants[value - 1]
if not selected_variant:
return
prim_name = selected_variant.GetName()
self.enable_variant(prim_name, self.looks, self.parent_prim, ignore_select=True)
def on_model_updated(self, _):
if not self._root:
return
# if we don't have selection then show nothing
if not self.model or not self.model.get_item("name") or not self.check_visibility("MMEEnableViewportUI"):
self._root.visible = False
return
# Update the shapes
position = self.model.get_as_floats(self.model.get_item("position"))
self._root.transform = sc.Matrix44.get_translation_matrix(*position)
self._root.visible = True
active_index = 0
for variant_prim in self.all_variants:
is_active_attr = variant_prim.GetAttribute("MMEisActive")
if is_active_attr:
# Checking if the attribute is_active_attr is active.
is_active = is_active_attr.Get()
if is_active:
active_index = self.all_variants.index(variant_prim) + 1
break
if self._slider_model:
if self._slider_subscription:
self._slider_subscription.unsubscribe()
self._slider_subscription = None
self._slider_model.as_int = active_index
self._slider_subscription = self._slider_model.subscribe_value_changed_fn(
lambda m: self.update_variant(m.as_int)
)
if self.prev_button and self.next_button:
self.prev_button.enabled = active_index > 0
self.next_button.enabled = active_index < len(self.all_variants)
self.prev_button.set_clicked_fn(lambda: self.update_variant(active_index - 1))
self.next_button.set_clicked_fn(lambda: self.update_variant(active_index + 1))
# Update the shape name
if self._name_label:
if active_index == 0:
self._name_label.text = "Orginal"
else:
self._name_label.text = f"{self.all_variants[active_index - 1].GetName()}"
| 8,631 | Python | 39.336448 | 117 | 0.588228 |
Vadim-Karpenko/omniverse-material-manager-extended/exts/karpenko.materialsmanager.ext/karpenko/materialsmanager/ext/viewport_ui/widget_info_scene.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["WidgetInfoScene"]
from omni.ui import scene as sc
from .widget_info_model import WidgetInfoModel
from .widget_info_manipulator import WidgetInfoManipulator
class WidgetInfoScene():
"""The Object Info Manupulator, placed into a Viewport"""
def __init__(self,
viewport_window,
ext_id: str,
all_variants: list,
enable_variant,
looks,
parent_prim,
check_visibility):
self._scene_view = None
self._viewport_window = viewport_window
# Create a unique frame for our SceneView
with self._viewport_window.get_frame(ext_id):
# Create a default SceneView (it has a default camera-model)
self._scene_view = sc.SceneView()
# Add the manipulator into the SceneView's scene
with self._scene_view.scene:
self.info_manipulator = WidgetInfoManipulator(
model=WidgetInfoModel(parent_prim=parent_prim, get_setting=check_visibility),
all_variants=all_variants,
enable_variant=enable_variant,
looks=looks,
parent_prim=parent_prim,
check_visibility=check_visibility,
)
# Register the SceneView with the Viewport to get projection and view updates
self._viewport_window.viewport_api.add_scene_view(self._scene_view)
def __del__(self):
self.destroy()
def destroy(self):
if self.info_manipulator:
self.info_manipulator.destroy()
if self._scene_view:
# Empty the SceneView of any elements it may have
self._scene_view.scene.clear()
# Be a good citizen, and un-register the SceneView from Viewport updates
if self._viewport_window:
self._viewport_window.viewport_api.remove_scene_view(self._scene_view)
# Remove our references to these objects
self._viewport_window = None
self._scene_view = None
self.info_manipulator = None
| 2,570 | Python | 38.553846 | 97 | 0.624125 |
Vadim-Karpenko/omniverse-material-manager-extended/exts/karpenko.materialsmanager.ext/karpenko/materialsmanager/ext/viewport_ui/widget_info_model.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["WidgetInfoModel"]
from omni.ui import scene as sc
from pxr import UsdGeom
from pxr import Usd
from pxr import UsdShade
from pxr import Tf
from pxr import UsdLux
import omni.usd
import omni.kit.commands
class WidgetInfoModel(sc.AbstractManipulatorModel):
"""
User part. The model tracks the position and info of the selected object.
"""
class PositionItem(sc.AbstractManipulatorItem):
"""
The Model Item represents the position. It doesn't contain anything
because because we take the position directly from USD when requesting.
"""
def __init__(self):
super().__init__()
self.value = [0, 0, 0]
class ValueItem(sc.AbstractManipulatorItem):
"""The Model Item contains a single float value about some attibute"""
def __init__(self, value=0):
super().__init__()
self.value = [value]
def __init__(self, parent_prim, get_setting):
super().__init__()
self.material_name = ""
self.position = WidgetInfoModel.PositionItem()
# The distance from the bounding box to the position the model returns
self._offset = 0
# Current selection
self._prim = parent_prim
self.get_setting = get_setting
self._current_path = ""
self._stage_listener = None
# Save the UsdContext name (we currently only work with single Context)
self._usd_context_name = ''
usd_context = self._get_context()
# Track selection
self._events = usd_context.get_stage_event_stream()
self._stage_event_sub = self._events.create_subscription_to_pop(
self._on_stage_event, name="Object Info Selection Update"
)
def _get_context(self) -> Usd.Stage:
# Get the UsdContext we are attached to
return omni.usd.get_context(self._usd_context_name)
def _notice_changed(self, notice, stage):
"""Called by Tf.Notice"""
if self.get_setting("MMEEnableRoamingMode", False):
self._item_changed(self.position)
return
for p in notice.GetChangedInfoOnlyPaths():
if self._current_path in str(p.GetPrimPath()):
self._item_changed(self.position)
def get_item(self, identifier):
if identifier == "position":
return self.position
if identifier == "name":
return self._current_path
if identifier == "material":
return self.material_name
def get_as_floats(self, item):
if item == self.position:
# Requesting position
return self._get_position()
if item:
# Get the value directly from the item
return item.value
return []
def set_floats(self, item, value):
if not self._current_path:
return
if not value or not item or item.value == value:
return
# Set directly to the item
item.value = value
# This makes the manipulator updated
self._item_changed(item)
def _on_stage_event(self, event):
"""Called by stage_event_stream"""
if event.type == int(omni.usd.StageEventType.SELECTION_CHANGED):
self._on_kit_selection_changed()
def _on_kit_selection_changed(self):
# selection change, reset it for now
self._current_path = ""
usd_context = self._get_context()
stage = usd_context.get_stage()
if not stage:
return
if not self.get_setting("MMEEnableRoamingMode", False):
prim_paths = usd_context.get_selection().get_selected_prim_paths()
if not prim_paths or len(prim_paths) > 1 or len(prim_paths) == 0 or str(self._prim.GetPath()) not in prim_paths[0]:
self._item_changed(self.position)
# Revoke the Tf.Notice listener, we don't need to update anything
if self._stage_listener:
self._stage_listener.Revoke()
self._stage_listener = None
return
prim = self._prim
if prim.GetTypeName() == "Light":
self.material_name = "I am a Light"
elif prim.IsA(UsdGeom.Imageable):
material, relationship = UsdShade.MaterialBindingAPI(prim).ComputeBoundMaterial()
if material:
self.material_name = str(material.GetPath())
else:
self.material_name = "N/A"
else:
self._prim = None
return
self._current_path = str(self._prim.GetPath())
# Add a Tf.Notice listener to update the position
if not self._stage_listener:
self._stage_listener = Tf.Notice.Register(Usd.Notice.ObjectsChanged, self._notice_changed, stage)
(old_scale, old_rotation_euler, old_rotation_order, old_translation) = omni.usd.get_local_transform_SRT(prim)
# Position is changed
self._item_changed(self.position)
def find_child_mesh_with_position(self, prim):
"""
A recursive method to find a child with a valid position.
"""
if prim.IsA(UsdGeom.Mesh):
self._current_path = str(prim.GetPath())
prim_position = self._get_position(non_recursive=True)
if prim_position[0] == 0.0 or prim_position[1] == 0.0 or prim_position[2] == 0.0:
pass
else:
return prim
for child in prim.GetChildren():
result = self.find_child_mesh_with_position(child)
if result:
return result
return None
def _get_position(self, non_recursive=False):
"""Returns position of currently selected object"""
stage = self._get_context().get_stage()
if not stage or not self._current_path:
return [0, 0, 0]
# Get position directly from USD
if non_recursive:
prim = stage.GetPrimAtPath(self._current_path)
else:
prim = self.find_child_mesh_with_position(stage.GetPrimAtPath(self._current_path))
box_cache = UsdGeom.BBoxCache(Usd.TimeCode.Default(), includedPurposes=[UsdGeom.Tokens.default_])
bound = box_cache.ComputeWorldBound(prim)
range = bound.ComputeAlignedBox()
bboxMin = range.GetMin()
bboxMax = range.GetMax()
position = [(bboxMin[0] + bboxMax[0]) * 0.5, bboxMax[1] + self._offset, (bboxMin[2] + bboxMax[2]) * 0.5]
return position
| 6,966 | Python | 34.728205 | 127 | 0.604795 |
zslrmhb/Omniverse-Virtual-Assisstant/nlp.py | # natural language processing module utilizing the Riva SDK
import wikipedia as wiki
import wikipediaapi as wiki_api
import riva.client
from config import URI
class NLPService:
def __init__(self, max_wiki_articles):
"""
:param max_wiki_articles: max wiki articles to search
"""
self.max_wiki_articles = max_wiki_articles
self.wiki_summary = " "
self.input_query = " "
self.auth = riva.client.Auth(uri=URI)
self.service = riva.client.NLPService(self.auth)
self.wiki_wiki = wiki_api.Wikipedia('en')
def wiki_query(self, input_query) -> None:
"""
:param input_query: word to search
:return: None
"""
self.wiki_summary = " "
self.input_query = input_query
wiki_articles = wiki.search(input_query)
for article in wiki_articles[:min(len(wiki_articles), self.max_wiki_articles)]:
print(f"Getting summary for: {article}")
page = self.wiki_wiki.page(article)
self.wiki_summary += "\n" + page.summary
def nlp_query(self) -> None:
"""
:return: Response from the NLP model
"""
resp = self.service.natural_query(self.input_query, self.wiki_summary)
if len(resp.results[0].answer) == 0:
return "Sorry, I don't understand, may you speak again?"
else:
return resp.results[0].answer
| 1,428 | Python | 32.232557 | 87 | 0.60084 |
zslrmhb/Omniverse-Virtual-Assisstant/tts.py | # text-to-speech module utilizting the Riva SDK
import riva.client
import riva.client.audio_io
from config import URI
class TTSService:
def __init__(self, language='en-US', sample_rate_hz=44100):
"""
:param language: language code
:param sample_rate_hz: sample rate herz
"""
self.auth = riva.client.Auth(uri=URI)
self.service = riva.client.SpeechSynthesisService(self.auth)
self.langauge = language
self.voice = "English-US.Male-1"
self.sample_rate_hz = sample_rate_hz
def speak(self, text) -> None:
"""
:param text: text to speak
:return: None
"""
sound_stream = riva.client.audio_io.SoundCallBack(
3, nchannels=1, sampwidth=2, framerate=self.sample_rate_hz
)
responses = self.service.synthesize_online(
text, None, self.langauge, sample_rate_hz=self.sample_rate_hz
)
for resp in responses:
sound_stream(resp.audio)
sound_stream.close()
def get_audio_bytes(self, text) -> bytes:
"""
:param text: text to speak
:return: speech audio
"""
resp = self.service.synthesize(text, self.voice, self.langauge, sample_rate_hz=self.sample_rate_hz)
return resp.audio
| 1,310 | Python | 28.795454 | 107 | 0.603817 |
zslrmhb/Omniverse-Virtual-Assisstant/audio2face_streaming_utils.py | """
This demo script shows how to send audio data to Audio2Face Streaming Audio Player via gRPC requests.
There are two options:
* Send the whole track at once using PushAudioRequest()
* Send the audio chunks seuqntially in a stream using PushAudioStreamRequest()
For the second option this script emulates the stream of chunks, generated by splitting an input WAV audio file.
But in a real application such stream of chunks may be aquired from some other streaming source:
* streaming audio via internet, streaming Text-To-Speech, etc
gRPC protocol details could be find in audio2face.proto
"""
import sys
import grpc
import time
import numpy as np
import soundfile
import audio2face_pb2
import audio2face_pb2_grpc
def push_audio_track(url, audio_data, samplerate, instance_name):
"""
This function pushes the whole audio track at once via PushAudioRequest()
PushAudioRequest parameters:
* audio_data: bytes, containing audio data for the whole track, where each sample is encoded as 4 bytes (float32)
* samplerate: sampling rate for the audio data
* instance_name: prim path of the Audio2Face Streaming Audio Player on the stage, were to push the audio data
* block_until_playback_is_finished: if True, the gRPC request will be blocked until the playback of the pushed track is finished
The request is passed to PushAudio()
"""
block_until_playback_is_finished = True # ADJUST
with grpc.insecure_channel(url) as channel:
stub = audio2face_pb2_grpc.Audio2FaceStub(channel)
request = audio2face_pb2.PushAudioRequest()
request.audio_data = audio_data.astype(np.float32).tobytes()
request.samplerate = samplerate
request.instance_name = instance_name
request.block_until_playback_is_finished = block_until_playback_is_finished
print("Sending audio data...")
response = stub.PushAudio(request)
if response.success:
print("SUCCESS")
else:
print(f"ERROR: {response.message}")
print("Closed channel")
def push_audio_track_stream(url, audio_data, samplerate, instance_name):
"""
This function pushes audio chunks sequentially via PushAudioStreamRequest()
The function emulates the stream of chunks, generated by splitting input audio track.
But in a real application such stream of chunks may be aquired from some other streaming source.
The first message must contain start_marker field, containing only meta information (without audio data):
* samplerate: sampling rate for the audio data
* instance_name: prim path of the Audio2Face Streaming Audio Player on the stage, were to push the audio data
* block_until_playback_is_finished: if True, the gRPC request will be blocked until the playback of the pushed track is finished (after the last message)
Second and other messages must contain audio_data field:
* audio_data: bytes, containing audio data for an audio chunk, where each sample is encoded as 4 bytes (float32)
All messages are packed into a Python generator and passed to PushAudioStream()
"""
chunk_size = samplerate // 10 # ADJUST
sleep_between_chunks = 0.04 # ADJUST
block_until_playback_is_finished = True # ADJUST
with grpc.insecure_channel(url) as channel:
print("Channel creadted")
stub = audio2face_pb2_grpc.Audio2FaceStub(channel)
def make_generator():
start_marker = audio2face_pb2.PushAudioRequestStart(
samplerate=samplerate,
instance_name=instance_name,
block_until_playback_is_finished=block_until_playback_is_finished,
)
# At first, we send a message with start_marker
yield audio2face_pb2.PushAudioStreamRequest(start_marker=start_marker)
# Then we send messages with audio_data
for i in range(len(audio_data) // chunk_size + 1):
time.sleep(sleep_between_chunks)
chunk = audio_data[i * chunk_size : i * chunk_size + chunk_size]
yield audio2face_pb2.PushAudioStreamRequest(audio_data=chunk.astype(np.float32).tobytes())
request_generator = make_generator()
print("Sending audio data...")
response = stub.PushAudioStream(request_generator)
if response.success:
print("SUCCESS")
else:
print(f"ERROR: {response.message}")
print("Channel closed")
def main():
"""
This demo script shows how to send audio data to Audio2Face Streaming Audio Player via gRPC requests.
There two options:
* Send the whole track at once using PushAudioRequest()
* Send the audio chunks seuqntially in a stream using PushAudioStreamRequest()
For the second option this script emulates the stream of chunks, generated by splitting an input WAV audio file.
But in a real application such stream of chunks may be aquired from some other streaming source:
* streaming audio via internet, streaming Text-To-Speech, etc
gRPC protocol details could be find in audio2face.proto
"""
if len(sys.argv) < 3:
print("Format: python test_client.py PATH_TO_WAV INSTANCE_NAME")
return
# Sleep time emulates long latency of the request
sleep_time = 2.0 # ADJUST
# URL of the Audio2Face Streaming Audio Player server (where A2F App is running)
url = "localhost:50051" # ADJUST
# Local input WAV file path
audio_fpath = sys.argv[1]
# Prim path of the Audio2Face Streaming Audio Player on the stage (were to push the audio data)
instance_name = sys.argv[2]
data, samplerate = soundfile.read(audio_fpath, dtype="float32")
# Only Mono audio is supported
if len(data.shape) > 1:
data = np.average(data, axis=1)
print(f"Sleeping for {sleep_time} seconds")
time.sleep(sleep_time)
if 0: # ADJUST
# Push the whole audio track at once
push_audio_track(url, data, samplerate, instance_name)
else:
# Emulate audio stream and push audio chunks sequentially
push_audio_track_stream(url, data, samplerate, instance_name)
if __name__ == "__main__":
main()
| 6,202 | Python | 42.377622 | 158 | 0.697356 |
zslrmhb/Omniverse-Virtual-Assisstant/asr.py | # Audio to Speech Module utilizing the Riva SDK
import riva.client
import riva.client.audio_io
from typing import Iterable
import riva.client.proto.riva_asr_pb2 as rasr
from config import URI
config = riva.client.StreamingRecognitionConfig(
config=riva.client.RecognitionConfig(
encoding=riva.client.AudioEncoding.LINEAR_PCM,
language_code='en-US',
max_alternatives=1,
profanity_filter=False,
enable_automatic_punctuation=True,
verbatim_transcripts=True,
sample_rate_hertz=16000
),
interim_results=False,
)
class ASRService:
def __init__(self):
"""
"""
self.auth = riva.client.Auth(uri=URI)
self.service = riva.client.ASRService(self.auth)
self.sample_rate_hz = 16000
self.file_streaming_chunk = 1600
self.transcript = ""
self.default_device_info = riva.client.audio_io.get_default_input_device_info()
self.default_device_index = None if self.default_device_info is None else self.default_device_info['index']
def run(self) -> None:
"""
:return: None
"""
with riva.client.audio_io.MicrophoneStream(
rate=self.sample_rate_hz,
chunk=self.file_streaming_chunk,
device=1,
) as audio_chunk_iterator:
self.print_response(responses=self.service.streaming_response_generator(
audio_chunks=audio_chunk_iterator,
streaming_config=config))
def print_response(self, responses: Iterable[rasr.StreamingRecognizeResponse]) -> None:
"""
:param responses: Streaming Response
:return: None
"""
self.transcript = ""
for response in responses:
if not response.results:
continue
for result in response.results:
if not result.alternatives:
continue
if result.is_final:
partial_transcript = result.alternatives[0].transcript
self.transcript += partial_transcript
# print(self.transcript)
return
# key = input("Press 'q' to finished recording\n"
# "Press 'r' to redo\n"
# "Press 'c' to continue record\n")
#
# micStream.closed = True
# while key not in ['q', 'r', 'c']:
# print("Please input the correct key!\n")
# key = input()
# micStream.closed = False
# if key == "q": return
# elif key == "r": self.transcript = ""
# else: continue
| 2,796 | Python | 34.858974 | 115 | 0.545422 |
zslrmhb/Omniverse-Virtual-Assisstant/main.py | # Running the Demo
from asr import ASRService
from nlp import NLPService
from tts import TTSService
from audio2face import Audio2FaceService
asr_service = ASRService()
nlp_service = NLPService(max_wiki_articles=5)
tts_service = TTSService()
audio2face_service = Audio2FaceService()
while True:
# speech recognition
asr_service.run()
print(asr_service.transcript)
# natural language processing with the help of Wikipedia API
nlp_service.wiki_query(asr_service.transcript)
output = nlp_service.nlp_query()
print(output)
# text-to-speech
audio_bytes = tts_service.get_audio_bytes(output)
# Audio2Face Animation
audio2face_service.make_avatar_speaks(audio_bytes)
| 721 | Python | 24.785713 | 64 | 0.736477 |
zslrmhb/Omniverse-Virtual-Assisstant/config.py | # configuration for accessing the remote local host on the Google Cloud Server
URI = "" # This will be in the syntax of external ip of your Riva Server:Port of your Riva Server
# Example: 12.34.56.789:50050
| 218 | Python | 53.749987 | 98 | 0.724771 |
zslrmhb/Omniverse-Virtual-Assisstant/audio2face_pb2_grpc.py | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import audio2face_pb2 as audio2face__pb2
class Audio2FaceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.PushAudio = channel.unary_unary(
"/nvidia.audio2face.Audio2Face/PushAudio",
request_serializer=audio2face__pb2.PushAudioRequest.SerializeToString,
response_deserializer=audio2face__pb2.PushAudioResponse.FromString,
)
self.PushAudioStream = channel.stream_unary(
"/nvidia.audio2face.Audio2Face/PushAudioStream",
request_serializer=audio2face__pb2.PushAudioStreamRequest.SerializeToString,
response_deserializer=audio2face__pb2.PushAudioStreamResponse.FromString,
)
class Audio2FaceServicer(object):
"""Missing associated documentation comment in .proto file."""
def PushAudio(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def PushAudioStream(self, request_iterator, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_Audio2FaceServicer_to_server(servicer, server):
rpc_method_handlers = {
"PushAudio": grpc.unary_unary_rpc_method_handler(
servicer.PushAudio,
request_deserializer=audio2face__pb2.PushAudioRequest.FromString,
response_serializer=audio2face__pb2.PushAudioResponse.SerializeToString,
),
"PushAudioStream": grpc.stream_unary_rpc_method_handler(
servicer.PushAudioStream,
request_deserializer=audio2face__pb2.PushAudioStreamRequest.FromString,
response_serializer=audio2face__pb2.PushAudioStreamResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler("nvidia.audio2face.Audio2Face", rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Audio2Face(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def PushAudio(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/nvidia.audio2face.Audio2Face/PushAudio",
audio2face__pb2.PushAudioRequest.SerializeToString,
audio2face__pb2.PushAudioResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def PushAudioStream(
request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.stream_unary(
request_iterator,
target,
"/nvidia.audio2face.Audio2Face/PushAudioStream",
audio2face__pb2.PushAudioStreamRequest.SerializeToString,
audio2face__pb2.PushAudioStreamResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
| 4,208 | Python | 33.219512 | 111 | 0.642586 |
zslrmhb/Omniverse-Virtual-Assisstant/audio2face.py | # speech to Audio2Face module utilizing the gRPC protocal from audio2face_streaming_utils
import riva.client
import io
from pydub import AudioSegment
from scipy.io.wavfile import read
import numpy as np
from audio2face_streaming_utils import push_audio_track
class Audio2FaceService:
def __init__(self, sample_rate=44100):
"""
:param sample_rate: sample rate
"""
self.a2f_url = 'localhost:50051' # Set it to the port of your local host
self.sample_rate = 44100
self.avatar_instance = '/World/audio2face/PlayerStreaming' # Set it to the name of your Audio2Face Streaming Instance
def tts_to_wav(self, tts_byte, framerate=22050) -> str:
"""
:param tts_byte: tts data in byte
:param framerate: framerate
:return: wav byte
"""
seg = AudioSegment.from_raw(io.BytesIO(tts_byte), sample_width=2, frame_rate=22050, channels=1)
wavIO = io.BytesIO()
seg.export(wavIO, format="wav")
rate, wav = read(io.BytesIO(wavIO.getvalue()))
return wav
def wav_to_numpy_float32(self, wav_byte) -> float:
"""
:param wav_byte: wav byte
:return: float32
"""
return wav_byte.astype(np.float32, order='C') / 32768.0
def get_tts_numpy_audio(self, audio) -> float:
"""
:param audio: audio from tts_to_wav
:return: float32 of the audio
"""
wav_byte = self.tts_to_wav(audio)
return self.wav_to_numpy_float32(wav_byte)
def make_avatar_speaks(self, audio) -> None:
"""
:param audio: tts audio
:return: None
"""
push_audio_track(self.a2f_url, self.get_tts_numpy_audio(audio), self.sample_rate, self.avatar_instance)
| 1,769 | Python | 32.396226 | 127 | 0.62182 |
PegasusSimulator/PegasusSimulator/examples/1_px4_single_vehicle.py | #!/usr/bin/env python
"""
| File: 1_px4_single_vehicle.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: This files serves as an example on how to build an app that makes use of the Pegasus API to run a simulation with a single vehicle, controlled using the MAVLink control backend.
"""
# Imports to start Isaac Sim from this script
import carb
from omni.isaac.kit import SimulationApp
# Start Isaac Sim's simulation environment
# Note: this simulation app must be instantiated right after the SimulationApp import, otherwise the simulator will crash
# as this is the object that will load all the extensions and load the actual simulator.
simulation_app = SimulationApp({"headless": False})
# -----------------------------------
# The actual script should start here
# -----------------------------------
import omni.timeline
from omni.isaac.core.world import World
# Import the Pegasus API for simulating drones
from pegasus.simulator.params import ROBOTS, SIMULATION_ENVIRONMENTS
from pegasus.simulator.logic.state import State
from pegasus.simulator.logic.backends.mavlink_backend import MavlinkBackend, MavlinkBackendConfig
from pegasus.simulator.logic.vehicles.multirotor import Multirotor, MultirotorConfig
from pegasus.simulator.logic.interface.pegasus_interface import PegasusInterface
# Auxiliary scipy and numpy modules
import os.path
from scipy.spatial.transform import Rotation
class PegasusApp:
"""
A Template class that serves as an example on how to build a simple Isaac Sim standalone App.
"""
def __init__(self):
"""
Method that initializes the PegasusApp and is used to setup the simulation environment.
"""
# Acquire the timeline that will be used to start/stop the simulation
self.timeline = omni.timeline.get_timeline_interface()
# Start the Pegasus Interface
self.pg = PegasusInterface()
# Acquire the World, .i.e, the singleton that controls that is a one stop shop for setting up physics,
# spawning asset primitives, etc.
self.pg._world = World(**self.pg._world_settings)
self.world = self.pg.world
# Launch one of the worlds provided by NVIDIA
self.pg.load_environment(SIMULATION_ENVIRONMENTS["Curved Gridroom"])
# Create the vehicle
# Try to spawn the selected robot in the world to the specified namespace
config_multirotor = MultirotorConfig()
# Create the multirotor configuration
mavlink_config = MavlinkBackendConfig({
"vehicle_id": 0,
"px4_autolaunch": True,
"px4_dir": self.pg.px4_path,
"px4_vehicle_model": self.pg.px4_default_airframe # CHANGE this line to 'iris' if using PX4 version bellow v1.14
})
config_multirotor.backends = [MavlinkBackend(mavlink_config)]
Multirotor(
"/World/quadrotor",
ROBOTS['Iris'],
0,
[0.0, 0.0, 0.07],
Rotation.from_euler("XYZ", [0.0, 0.0, 0.0], degrees=True).as_quat(),
config=config_multirotor,
)
# Reset the simulation environment so that all articulations (aka robots) are initialized
self.world.reset()
# Auxiliar variable for the timeline callback example
self.stop_sim = False
def run(self):
"""
Method that implements the application main loop, where the physics steps are executed.
"""
# Start the simulation
self.timeline.play()
# The "infinite" loop
while simulation_app.is_running() and not self.stop_sim:
# Update the UI of the app and perform the physics step
self.world.step(render=True)
# Cleanup and stop
carb.log_warn("PegasusApp Simulation App is closing.")
self.timeline.stop()
simulation_app.close()
def main():
# Instantiate the template app
pg_app = PegasusApp()
# Run the application loop
pg_app.run()
if __name__ == "__main__":
main()
| 4,153 | Python | 35.438596 | 192 | 0.667229 |
PegasusSimulator/PegasusSimulator/examples/6_paper_results.py | #!/usr/bin/env python
"""
| File: python_control_backend.py
| Author: Marcelo Jacinto and Joao Pinto ([email protected], [email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: This files serves as an example on how to use the control backends API to create a custom controller
for the vehicle from scratch and use it to perform a simulation, without using PX4 nor ROS. NOTE: to see the HDR
environment as shown in the video and paper, you must have opened ISAAC SIM at least once thorugh the OMNIVERSE APP,
otherwise, the path to the HDR environment is not recognized.
"""
# Imports to start Isaac Sim from this script
import carb
from omni.isaac.kit import SimulationApp
# Start Isaac Sim's simulation environment
# Note: this simulation app must be instantiated right after the SimulationApp import, otherwise the simulator will crash
# as this is the object that will load all the extensions and load the actual simulator.
simulation_app = SimulationApp({"headless": False})
# -----------------------------------
# The actual script should start here
# -----------------------------------
import omni.timeline
from omni.isaac.core.world import World
# Used for adding extra lights to the environment
import omni.isaac.core.utils.prims as prim_utils
import omni.kit.commands
from pxr import Sdf
# Import the Pegasus API for simulating drones
from pegasus.simulator.params import ROBOTS
from pegasus.simulator.logic.vehicles.multirotor import Multirotor, MultirotorConfig
from pegasus.simulator.logic.dynamics.linear_drag import LinearDrag
from pegasus.simulator.logic.interface.pegasus_interface import PegasusInterface
# Import the custom python control backend
from utils.nonlinear_controller import NonlinearController
# Auxiliary scipy and numpy modules
import numpy as np
from scipy.spatial.transform import Rotation
# Use os and pathlib for parsing the desired trajectory from a CSV file
import os
from pathlib import Path
from omni.isaac.debug_draw import _debug_draw
class PegasusApp:
"""
A Template class that serves as an example on how to build a simple Isaac Sim standalone App.
"""
def __init__(self):
"""
Method that initializes the PegasusApp and is used to setup the simulation environment.
"""
# Acquire the timeline that will be used to start/stop the simulation
self.timeline = omni.timeline.get_timeline_interface()
# Start the Pegasus Interface
self.pg = PegasusInterface()
# Acquire the World, .i.e, the singleton that controls that is a one stop shop for setting up physics,
# spawning asset primitives, etc.
self.pg._world_settings = {"physics_dt": 1.0 / 500.0, "stage_units_in_meters": 1.0, "rendering_dt": 1.0 / 60.0}
self.pg._world = World(**self.pg._world_settings)
self.world = self.pg.world
prim_utils.create_prim(
"/World/Light/DomeLight",
"DomeLight",
position=np.array([1.0, 1.0, 1.0]),
attributes={
"inputs:intensity": 5e3,
"inputs:color": (1.0, 1.0, 1.0),
"inputs:texture:file": "omniverse://localhost/NVIDIA/Assets/Skies/Indoor/ZetoCGcom_ExhibitionHall_Interior1.hdr"
}
)
# Get the current directory used to read trajectories and save results
self.curr_dir = str(Path(os.path.dirname(os.path.realpath(__file__))).resolve())
# Create the vehicle 1
# Try to spawn the selected robot in the world to the specified namespace
config_multirotor1 = MultirotorConfig()
config_multirotor1.drag = LinearDrag([0.0, 0.0, 0.0])
# Use the nonlinear controller with the built-in exponential trajectory
config_multirotor1.backends = [NonlinearController(
trajectory_file=None,
results_file=self.curr_dir + "/results/statistics_1.npz")]
Multirotor(
"/World/quadrotor1",
ROBOTS['Iris'],
1,
[-5.0,0.00,1.00],
Rotation.from_euler("XYZ", [0.0, 0.0, 0.0], degrees=True).as_quat(),
config=config_multirotor1,
)
# Create the vehicle 2
#Try to spawn the selected robot in the world to the specified namespace
config_multirotor2 = MultirotorConfig()
# Use the nonlinear controller with the built-in exponential trajectory
config_multirotor2.backends = [NonlinearController(
trajectory_file=None,
results_file=self.curr_dir + "/results/statistics_2.npz",
reverse=True)]
Multirotor(
"/World/quadrotor2",
ROBOTS['Iris'],
2,
[-5.0,4.5,1.0],
Rotation.from_euler("XYZ", [0.0, 0.0, 0.0], degrees=True).as_quat(),
config=config_multirotor2,
)
# Set the camera to a nice position so that we can see the 2 drones almost touching each other
self.pg.set_viewport_camera([1.0, 5.15, 1.65], [0.0, -1.65, 3.3])
# Draw the lines of the desired trajectory in Isaac Sim with the same color as the output plots for the paper
gamma = np.arange(start=-5.0, stop=5.0, step=0.01)
num_samples = gamma.size
trajectory1 = [config_multirotor1.backends[0].pd(gamma[i], 0.6) for i in range(num_samples)]
trajectory2 = [config_multirotor2.backends[0].pd(gamma[i], 0.6, reverse=True) for i in range(num_samples)]
draw = _debug_draw.acquire_debug_draw_interface()
point_list_1 = [(trajectory1[i][0], trajectory1[i][1], trajectory1[i][2]) for i in range(num_samples)]
draw.draw_lines_spline(point_list_1, (31/255, 119/255, 180/255, 1), 5, False)
point_list_2 = [(trajectory2[i][0], trajectory2[i][1], trajectory2[i][2]) for i in range(num_samples)]
draw.draw_lines_spline(point_list_2, (255/255, 0, 0, 1), 5, False)
# Reset the world
self.world.reset()
def run(self):
"""
Method that implements the application main loop, where the physics steps are executed.
"""
# Start the simulation
self.timeline.play()
# The "infinite" loop
while simulation_app.is_running():
# Update the UI of the app and perform the physics step
self.world.step(render=True)
# Cleanup and stop
carb.log_warn("PegasusApp Simulation App is closing.")
self.timeline.stop()
simulation_app.close()
def main():
# Instantiate the template app
pg_app = PegasusApp()
# Run the application loop
pg_app.run()
if __name__ == "__main__":
main()
| 6,750 | Python | 37.79885 | 128 | 0.652741 |
PegasusSimulator/PegasusSimulator/examples/5_python_multi_vehicle.py | #!/usr/bin/env python
"""
| File: python_control_backend.py
| Author: Marcelo Jacinto and Joao Pinto ([email protected], [email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: This files serves as an example on how to use the control backends API to create a custom controller
for the vehicle from scratch and use it to perform a simulation, without using PX4 nor ROS.
"""
# Imports to start Isaac Sim from this script
import carb
from omni.isaac.kit import SimulationApp
# Start Isaac Sim's simulation environment
# Note: this simulation app must be instantiated right after the SimulationApp import, otherwise the simulator will crash
# as this is the object that will load all the extensions and load the actual simulator.
simulation_app = SimulationApp({"headless": False})
# -----------------------------------
# The actual script should start here
# -----------------------------------
import omni.timeline
from omni.isaac.core.world import World
# Used for adding extra lights to the environment
import omni.isaac.core.utils.prims as prim_utils
# Import the Pegasus API for simulating drones
from pegasus.simulator.params import ROBOTS
from pegasus.simulator.logic.vehicles.multirotor import Multirotor, MultirotorConfig
from pegasus.simulator.logic.interface.pegasus_interface import PegasusInterface
# Import the custom python control backend
from utils.nonlinear_controller import NonlinearController
# Auxiliary scipy and numpy modules
import numpy as np
from scipy.spatial.transform import Rotation
# Use os and pathlib for parsing the desired trajectory from a CSV file
import os
from pathlib import Path
import random
from omni.isaac.debug_draw import _debug_draw
class PegasusApp:
"""
A Template class that serves as an example on how to build a simple Isaac Sim standalone App.
"""
def __init__(self):
"""
Method that initializes the PegasusApp and is used to setup the simulation environment.
"""
# Acquire the timeline that will be used to start/stop the simulation
self.timeline = omni.timeline.get_timeline_interface()
# Start the Pegasus Interface
self.pg = PegasusInterface()
# Acquire the World, .i.e, the singleton that controls that is a one stop shop for setting up physics,
# spawning asset primitives, etc.
self.pg._world = World(**self.pg._world_settings)
self.world = self.pg.world
# Add a custom light with a high-definition HDR surround environment of an exhibition hall,
# instead of the typical ground plane
prim_utils.create_prim(
"/World/Light/DomeLight",
"DomeLight",
position=np.array([1.0, 1.0, 1.0]),
attributes={
"inputs:intensity": 5e3,
"inputs:color": (1.0, 1.0, 1.0),
"inputs:texture:file": "omniverse://localhost/NVIDIA/Assets/Skies/Indoor/ZetoCGcom_ExhibitionHall_Interior1.hdr"
}
)
# Get the current directory used to read trajectories and save results
self.curr_dir = str(Path(os.path.dirname(os.path.realpath(__file__))).resolve())
# Create the vehicle 1
# Try to spawn the selected robot in the world to the specified namespace
config_multirotor1 = MultirotorConfig()
config_multirotor1.backends = [NonlinearController(
trajectory_file=self.curr_dir + "/trajectories/pitch_relay_90_deg_1.csv",
results_file=self.curr_dir + "/results/statistics_1.npz",
Ki=[0.5, 0.5, 0.5],
Kr=[2.0, 2.0, 2.0])]
Multirotor(
"/World/quadrotor1",
ROBOTS['Iris'],
1,
[0,-1.5, 8.0],
Rotation.from_euler("XYZ", [0.0, 0.0, 0.0], degrees=True).as_quat(),
config=config_multirotor1,
)
# Create the vehicle 2
#Try to spawn the selected robot in the world to the specified namespace
config_multirotor2 = MultirotorConfig()
config_multirotor2.backends = [NonlinearController(
trajectory_file=self.curr_dir + "/trajectories/pitch_relay_90_deg_2.csv",
results_file=self.curr_dir + "/results/statistics_2.npz",
Ki=[0.5, 0.5, 0.5],
Kr=[2.0, 2.0, 2.0])]
Multirotor(
"/World/quadrotor2",
ROBOTS['Iris'],
2,
[2.3,-1.5, 8.0],
Rotation.from_euler("XYZ", [0.0, 0.0, 0.0], degrees=True).as_quat(),
config=config_multirotor2,
)
# Set the camera to a nice position so that we can see the 2 drones almost touching each other
self.pg.set_viewport_camera([7.53, -1.6, 4.96], [0.0, 3.3, 7.0])
# Read the trajectories and plot them inside isaac sim
trajectory1 = np.flip(np.genfromtxt(self.curr_dir + "/trajectories/pitch_relay_90_deg_1.csv", delimiter=','), axis=0)
num_samples1,_ = trajectory1.shape
trajectory2 = np.flip(np.genfromtxt(self.curr_dir + "/trajectories/pitch_relay_90_deg_2.csv", delimiter=','), axis=0)
num_samples2,_ = trajectory2.shape
# Draw the lines of the desired trajectory in Isaac Sim with the same color as the output plots for the paper
draw = _debug_draw.acquire_debug_draw_interface()
point_list_1 = [(trajectory1[i,1], trajectory1[i,2], trajectory1[i,3]) for i in range(num_samples1)]
draw.draw_lines_spline(point_list_1, (31/255, 119/255, 180/255, 1), 5, False)
point_list_2 = [(trajectory2[i,1], trajectory2[i,2], trajectory2[i,3]) for i in range(num_samples2)]
draw.draw_lines_spline(point_list_2, (255/255, 0, 0, 1), 5, False)
self.world.reset()
def run(self):
"""
Method that implements the application main loop, where the physics steps are executed.
"""
# Start the simulation
self.timeline.play()
# The "infinite" loop
while simulation_app.is_running():
# Update the UI of the app and perform the physics step
self.world.step(render=True)
# Cleanup and stop
carb.log_warn("PegasusApp Simulation App is closing.")
self.timeline.stop()
simulation_app.close()
def main():
# Instantiate the template app
pg_app = PegasusApp()
# Run the application loop
pg_app.run()
if __name__ == "__main__":
main() | 6,518 | Python | 37.803571 | 128 | 0.642989 |
PegasusSimulator/PegasusSimulator/examples/0_template_app.py | #!/usr/bin/env python
"""
| File: 0_template_app.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: This files serves as a template on how to build a clean and simple Isaac Sim based standalone App.
"""
# Imports to start Isaac Sim from this script
import carb
from omni.isaac.kit import SimulationApp
# Start Isaac Sim's simulation environment
# Note: this simulation app must be instantiated right after the SimulationApp import, otherwise the simulator will crash
# as this is the object that will load all the extensions and load the actual simulator.
simulation_app = SimulationApp({"headless": False})
# -----------------------------------
# The actual script should start here
# -----------------------------------
import omni.timeline
from omni.isaac.core import World
class Template:
"""
A Template class that serves as an example on how to build a simple Isaac Sim standalone App.
"""
def __init__(self):
"""
Method that initializes the template App and is used to setup the simulation environment.
"""
# Acquire the timeline that will be used to start/stop the simulation
self.timeline = omni.timeline.get_timeline_interface()
# Acquire the World, .i.e, the singleton that controls that is a one stop shop for setting up physics,
# spawning asset primitives, etc.
self.world = World()
# Create a ground plane for the simulation
self.world.scene.add_default_ground_plane()
# Create an example physics callback
self.world.add_physics_callback('template_physics_callback', self.physics_callback)
# Create an example render callback
self.world.add_render_callback('template_render_callback', self.render_callback)
# Create an example timeline callback
self.world.add_timeline_callback('template_timeline_callback', self.timeline_callback)
# Reset the simulation environment so that all articulations (aka robots) are initialized
self.world.reset()
# Auxiliar variable for the timeline callback example
self.stop_sim = False
def physics_callback(self, dt: float):
"""An example physics callback. It will get invoked every physics step.
Args:
dt (float): The time difference between the previous and current function call, in seconds.
"""
carb.log_info("This is a physics callback. It is called every " + str(dt) + " seconds!")
def render_callback(self, data):
"""An example render callback. It will get invoked for every rendered frame.
Args:
data: Rendering data.
"""
carb.log_info("This is a render callback. It is called every frame!")
def timeline_callback(self, timeline_event):
"""An example timeline callback. It will get invoked every time a timeline event occurs. In this example,
we will check if the event is for a 'simulation stop'. If so, we will attempt to close the app
Args:
timeline_event: A timeline event
"""
if self.world.is_stopped():
self.stop_sim = True
def run(self):
"""
Method that implements the application main loop, where the physics steps are executed.
"""
# Start the simulation
self.timeline.play()
# The "infinite" loop
while simulation_app.is_running() and not self.stop_sim:
# Update the UI of the app and perform the physics step
self.world.step(render=True)
# Cleanup and stop
carb.log_warn("Template Simulation App is closing.")
self.timeline.stop()
simulation_app.close()
def main():
# Instantiate the template app
template_app = Template()
# Run the application loop
template_app.run()
if __name__ == "__main__":
main()
| 4,001 | Python | 34.105263 | 121 | 0.652587 |
PegasusSimulator/PegasusSimulator/examples/8_camera_vehicle.py | #!/usr/bin/env python
"""
| File: 8_camera_vehicle.py
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto and Filip Stec. All rights reserved.
| Description: This files serves as an example on how to build an app that makes use of the Pegasus API to run a simulation
with a single vehicle equipped with a camera, producing rgb and camera info ROS2 topics.
"""
# Imports to start Isaac Sim from this script
import carb
from omni.isaac.kit import SimulationApp
# Start Isaac Sim's simulation environment
# Note: this simulation app must be instantiated right after the SimulationApp import, otherwise the simulator will crash
# as this is the object that will load all the extensions and load the actual simulator.
simulation_app = SimulationApp({"headless": False})
# -----------------------------------
# The actual script should start here
# -----------------------------------
import omni.timeline
from omni.isaac.core.world import World
from omni.isaac.core.utils.extensions import disable_extension, enable_extension
# Enable/disable ROS bridge extensions to keep only ROS2 Bridge
disable_extension("omni.isaac.ros_bridge")
enable_extension("omni.isaac.ros2_bridge")
# Import the Pegasus API for simulating drones
from pegasus.simulator.params import ROBOTS, SIMULATION_ENVIRONMENTS
from pegasus.simulator.logic.state import State
from pegasus.simulator.logic.backends.mavlink_backend import MavlinkBackend, MavlinkBackendConfig
from pegasus.simulator.logic.vehicles.multirotor import Multirotor, MultirotorConfig
from pegasus.simulator.logic.interface.pegasus_interface import PegasusInterface
from pegasus.simulator.logic.graphs import ROS2Camera
# Auxiliary scipy and numpy modules
from scipy.spatial.transform import Rotation
class PegasusApp:
"""
A Template class that serves as an example on how to build a simple Isaac Sim standalone App.
"""
def __init__(self):
"""
Method that initializes the PegasusApp and is used to setup the simulation environment.
"""
# Acquire the timeline that will be used to start/stop the simulation
self.timeline = omni.timeline.get_timeline_interface()
# Start the Pegasus Interface
self.pg = PegasusInterface()
# Acquire the World, .i.e, the singleton that controls that is a one stop shop for setting up physics,
# spawning asset primitives, etc.
self.pg._world = World(**self.pg._world_settings)
self.world = self.pg.world
# Launch one of the worlds provided by NVIDIA
self.pg.load_environment(SIMULATION_ENVIRONMENTS["Curved Gridroom"])
# Create the vehicle
# Try to spawn the selected robot in the world to the specified namespace
config_multirotor = MultirotorConfig()
# Create the multirotor configuration
mavlink_config = MavlinkBackendConfig({
"vehicle_id": 0,
"px4_autolaunch": True,
"px4_dir": "/home/marcelo/PX4-Autopilot",
"px4_vehicle_model": 'iris'
})
config_multirotor.backends = [MavlinkBackend(mavlink_config)]
# Create camera graph for the existing Camera prim on the Iris model, which can be found
# at the prim path `/World/quadrotor/body/Camera`. The camera prim path is the local path from the vehicle's prim path
# to the camera prim, to which this graph will be connected. All ROS2 topics published by this graph will have
# namespace `quadrotor` and frame_id `Camera` followed by the selected camera types (`rgb`, `camera_info`).
config_multirotor.graphs = [ROS2Camera("body/Camera", config={"types": ['rgb', 'camera_info']})]
Multirotor(
"/World/quadrotor",
ROBOTS['Iris'],
0,
[0.0, 0.0, 0.07],
Rotation.from_euler("XYZ", [0.0, 0.0, 0.0], degrees=True).as_quat(),
config=config_multirotor,
)
# Reset the simulation environment so that all articulations (aka robots) are initialized
self.world.reset()
# Auxiliar variable for the timeline callback example
self.stop_sim = False
def run(self):
"""
Method that implements the application main loop, where the physics steps are executed.
"""
# Start the simulation
self.timeline.play()
# The "infinite" loop
while simulation_app.is_running() and not self.stop_sim:
# Update the UI of the app and perform the physics step
self.world.step(render=True)
# Cleanup and stop
carb.log_warn("PegasusApp Simulation App is closing.")
self.timeline.stop()
simulation_app.close()
def main():
# Instantiate the template app
pg_app = PegasusApp()
# Run the application loop
pg_app.run()
if __name__ == "__main__":
main()
| 4,878 | Python | 38.032 | 126 | 0.677532 |
PegasusSimulator/PegasusSimulator/examples/utils/nonlinear_controller.py | #!/usr/bin/env python
"""
| File: nonlinear_controller.py
| Author: Marcelo Jacinto and Joao Pinto ([email protected], [email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: This files serves as an example on how to use the control backends API to create a custom controller
for the vehicle from scratch and use it to perform a simulation, without using PX4 nor ROS. In this controller, we
provide a quick way of following a given trajectory specified in csv files or track an hard-coded trajectory based
on exponentials! NOTE: This is just an example, to demonstrate the potential of the API. A much more flexible solution
can be achieved
"""
# Imports to be able to log to the terminal with fancy colors
import carb
# Imports from the Pegasus library
from pegasus.simulator.logic.state import State
from pegasus.simulator.logic.backends import Backend
# Auxiliary scipy and numpy modules
import numpy as np
from scipy.spatial.transform import Rotation
class NonlinearController(Backend):
"""A nonlinear controller class. It implements a nonlinear controller that allows a vehicle to track
aggressive trajectories. This controlers is well described in the papers
[1] J. Pinto, B. J. Guerreiro and R. Cunha, "Planning Parcel Relay Manoeuvres for Quadrotors,"
2021 International Conference on Unmanned Aircraft Systems (ICUAS), Athens, Greece, 2021,
pp. 137-145, doi: 10.1109/ICUAS51884.2021.9476757.
[2] D. Mellinger and V. Kumar, "Minimum snap trajectory generation and control for quadrotors,"
2011 IEEE International Conference on Robotics and Automation, Shanghai, China, 2011,
pp. 2520-2525, doi: 10.1109/ICRA.2011.5980409.
"""
def __init__(self,
trajectory_file: str = None,
results_file: str=None,
reverse=False,
Kp=[10.0, 10.0, 10.0],
Kd=[8.5, 8.5, 8.5],
Ki=[1.50, 1.50, 1.50],
Kr=[3.5, 3.5, 3.5],
Kw=[0.5, 0.5, 0.5]):
# The current rotor references [rad/s]
self.input_ref = [0.0, 0.0, 0.0, 0.0]
# The current state of the vehicle expressed in the inertial frame (in ENU)
self.p = np.zeros((3,)) # The vehicle position
self.R: Rotation = Rotation.identity() # The vehicle attitude
self.w = np.zeros((3,)) # The angular velocity of the vehicle
self.v = np.zeros((3,)) # The linear velocity of the vehicle in the inertial frame
self.a = np.zeros((3,)) # The linear acceleration of the vehicle in the inertial frame
# Define the control gains matrix for the outer-loop
self.Kp = np.diag(Kp)
self.Kd = np.diag(Kd)
self.Ki = np.diag(Ki)
self.Kr = np.diag(Kr)
self.Kw = np.diag(Kw)
self.int = np.array([0.0, 0.0, 0.0])
# Define the dynamic parameters for the vehicle
self.m = 1.50 # Mass in Kg
self.g = 9.81 # The gravity acceleration ms^-2
# Read the target trajectory from a CSV file inside the trajectories directory
# if a trajectory is provided. Otherwise, just perform the hard-coded trajectory provided with this controller
if trajectory_file is not None:
self.trajectory = self.read_trajectory_from_csv(trajectory_file)
self.index = 0
self.max_index, _ = self.trajectory.shape
self.total_time = 0.0
# Use the built-in trajectory hard-coded for this controller
else:
# Set the initial time for starting when using the built-in trajectory (the time is also used in this case
# as the parametric value)
self.total_time = -5.0
# Signal that we will not used a received trajectory
self.trajectory = None
self.max_index = 1
self.reverse = reverse
# Auxiliar variable, so that we only start sending motor commands once we get the state of the vehicle
self.reveived_first_state = False
# Lists used for analysing performance statistics
self.results_files = results_file
self.time_vector = []
self.desired_position_over_time = []
self.position_over_time = []
self.position_error_over_time = []
self.velocity_error_over_time = []
self.atittude_error_over_time = []
self.attitude_rate_error_over_time = []
def read_trajectory_from_csv(self, file_name: str):
"""Auxiliar method used to read the desired trajectory from a CSV file
Args:
file_name (str): A string with the name of the trajectory inside the trajectories directory
Returns:
np.ndarray: A numpy matrix with the trajectory desired states over time
"""
# Read the trajectory to a pandas frame
return np.flip(np.genfromtxt(file_name, delimiter=','), axis=0)
def start(self):
"""
Reset the control and trajectory index
"""
self.reset_statistics()
def stop(self):
"""
Stopping the controller. Saving the statistics data for plotting later
"""
# Check if we should save the statistics to some file or not
if self.results_files is None:
return
statistics = {}
statistics["time"] = np.array(self.time_vector)
statistics["p"] = np.vstack(self.position_over_time)
statistics["desired_p"] = np.vstack(self.desired_position_over_time)
statistics["ep"] = np.vstack(self.position_error_over_time)
statistics["ev"] = np.vstack(self.velocity_error_over_time)
statistics["er"] = np.vstack(self.atittude_error_over_time)
statistics["ew"] = np.vstack(self.attitude_rate_error_over_time)
np.savez(self.results_files, **statistics)
carb.log_warn("Statistics saved to: " + self.results_files)
self.reset_statistics()
def update_sensor(self, sensor_type: str, data):
"""
Do nothing. For now ignore all the sensor data and just use the state directly for demonstration purposes.
This is a callback that is called at every physics step.
Args:
sensor_type (str): The name of the sensor providing the data
data (dict): A dictionary that contains the data produced by the sensor
"""
pass
def update_state(self, state: State):
"""
Method that updates the current state of the vehicle. This is a callback that is called at every physics step
Args:
state (State): The current state of the vehicle.
"""
self.p = state.position
self.R = Rotation.from_quat(state.attitude)
self.w = state.angular_velocity
self.v = state.linear_velocity
self.reveived_first_state = True
def input_reference(self):
"""
Method that is used to return the latest target angular velocities to be applied to the vehicle
Returns:
A list with the target angular velocities for each individual rotor of the vehicle
"""
return self.input_ref
def update(self, dt: float):
"""Method that implements the nonlinear control law and updates the target angular velocities for each rotor.
This method will be called by the simulation on every physics step
Args:
dt (float): The time elapsed between the previous and current function calls (s).
"""
if self.reveived_first_state == False:
return
# -------------------------------------------------
# Update the references for the controller to track
# -------------------------------------------------
self.total_time += dt
# Check if we need to update to the next trajectory index
if self.index < self.max_index - 1 and self.total_time >= self.trajectory[self.index + 1, 0]:
self.index += 1
# Update using an external trajectory
if self.trajectory is not None:
# the target positions [m], velocity [m/s], accelerations [m/s^2], jerk [m/s^3], yaw-angle [rad], yaw-rate [rad/s]
p_ref = np.array([self.trajectory[self.index, 1], self.trajectory[self.index, 2], self.trajectory[self.index, 3]])
v_ref = np.array([self.trajectory[self.index, 4], self.trajectory[self.index, 5], self.trajectory[self.index, 6]])
a_ref = np.array([self.trajectory[self.index, 7], self.trajectory[self.index, 8], self.trajectory[self.index, 9]])
j_ref = np.array([self.trajectory[self.index, 10], self.trajectory[self.index, 11], self.trajectory[self.index, 12]])
yaw_ref = self.trajectory[self.index, 13]
yaw_rate_ref = self.trajectory[self.index, 14]
# Or update the reference using the built-in trajectory
else:
s = 0.6
p_ref = self.pd(self.total_time, s, self.reverse)
v_ref = self.d_pd(self.total_time, s, self.reverse)
a_ref = self.dd_pd(self.total_time, s, self.reverse)
j_ref = self.ddd_pd(self.total_time, s, self.reverse)
yaw_ref = self.yaw_d(self.total_time, s)
yaw_rate_ref = self.d_yaw_d(self.total_time, s)
# -------------------------------------------------
# Start the controller implementation
# -------------------------------------------------
# Compute the tracking errors
ep = self.p - p_ref
ev = self.v - v_ref
self.int = self.int + (ep * dt)
ei = self.int
# Compute F_des term
F_des = -(self.Kp @ ep) - (self.Kd @ ev) - (self.Ki @ ei) + np.array([0.0, 0.0, self.m * self.g]) + (self.m * a_ref)
# Get the current axis Z_B (given by the last column of the rotation matrix)
Z_B = self.R.as_matrix()[:,2]
# Get the desired total thrust in Z_B direction (u_1)
u_1 = F_des @ Z_B
# Compute the desired body-frame axis Z_b
Z_b_des = F_des / np.linalg.norm(F_des)
# Compute X_C_des
X_c_des = np.array([np.cos(yaw_ref), np.sin(yaw_ref), 0.0])
# Compute Y_b_des
Z_b_cross_X_c = np.cross(Z_b_des, X_c_des)
Y_b_des = Z_b_cross_X_c / np.linalg.norm(Z_b_cross_X_c)
# Compute X_b_des
X_b_des = np.cross(Y_b_des, Z_b_des)
# Compute the desired rotation R_des = [X_b_des | Y_b_des | Z_b_des]
R_des = np.c_[X_b_des, Y_b_des, Z_b_des]
R = self.R.as_matrix()
# Compute the rotation error
e_R = 0.5 * self.vee((R_des.T @ R) - (R.T @ R_des))
# Compute an approximation of the current vehicle acceleration in the inertial frame (since we cannot measure it directly)
self.a = (u_1 * Z_B) / self.m - np.array([0.0, 0.0, self.g])
# Compute the desired angular velocity by projecting the angular velocity in the Xb-Yb plane
# projection of angular velocity on xB − yB plane
# see eqn (7) from [2].
hw = (self.m / u_1) * (j_ref - np.dot(Z_b_des, j_ref) * Z_b_des)
# desired angular velocity
w_des = np.array([-np.dot(hw, Y_b_des),
np.dot(hw, X_b_des),
yaw_rate_ref * Z_b_des[2]])
# Compute the angular velocity error
e_w = self.w - w_des
# Compute the torques to apply on the rigid body
tau = -(self.Kr @ e_R) - (self.Kw @ e_w)
# Use the allocation matrix provided by the Multirotor vehicle to convert the desired force and torque
# to angular velocity [rad/s] references to give to each rotor
if self.vehicle:
self.input_ref = self.vehicle.force_and_torques_to_velocities(u_1, tau)
# ----------------------------
# Statistics to save for later
# ----------------------------
self.time_vector.append(self.total_time)
self.position_over_time.append(self.p)
self.desired_position_over_time.append(p_ref)
self.position_error_over_time.append(ep)
self.velocity_error_over_time.append(ev)
self.atittude_error_over_time.append(e_R)
self.attitude_rate_error_over_time.append(e_w)
@staticmethod
def vee(S):
"""Auxiliary function that computes the 'v' map which takes elements from so(3) to R^3.
Args:
S (np.array): A matrix in so(3)
"""
return np.array([-S[1,2], S[0,2], -S[0,1]])
def reset_statistics(self):
self.index = 0
# If we received an external trajectory, reset the time to 0.0
if self.trajectory is not None:
self.total_time = 0.0
# if using the internal trajectory, make the parametric value start at -5.0
else:
self.total_time = -5.0
# Reset the lists used for analysing performance statistics
self.time_vector = []
self.desired_position_over_time = []
self.position_over_time = []
self.position_error_over_time = []
self.velocity_error_over_time = []
self.atittude_error_over_time = []
self.attitude_rate_error_over_time = []
# ---------------------------------------------------
# Definition of an exponential trajectory for example
# This can be used as a reference if not trajectory file is passed
# as an argument to the constructor of this class
# ---------------------------------------------------
def pd(self, t, s, reverse=False):
"""The desired position of the built-in trajectory
Args:
t (float): The parametric value that guides the equation
s (float): How steep and agressive the curve is
reverse (bool, optional): Choose whether we want to flip the curve (so that we can have 2 drones almost touching). Defaults to False.
Returns:
np.ndarray: A 3x1 array with the x, y ,z desired [m]
"""
x = t
z = 1 / s * np.exp(-0.5 * np.power(t/s, 2)) + 1.0
y = 1 / s * np.exp(-0.5 * np.power(t/s, 2))
if reverse == True:
y = -1 / s * np.exp(-0.5 * np.power(t/s, 2)) + 4.5
return np.array([x,y,z])
def d_pd(self, t, s, reverse=False):
"""The desired velocity of the built-in trajectory
Args:
t (float): The parametric value that guides the equation
s (float): How steep and agressive the curve is
reverse (bool, optional): Choose whether we want to flip the curve (so that we can have 2 drones almost touching). Defaults to False.
Returns:
np.ndarray: A 3x1 array with the d_x, d_y ,d_z desired [m/s]
"""
x = 1.0
y = -(t * np.exp(-np.power(t,2)/(2*np.power(s,2))))/np.power(s,3)
z = -(t * np.exp(-np.power(t,2)/(2*np.power(s,2))))/np.power(s,3)
if reverse == True:
y = (t * np.exp(-np.power(t,2)/(2*np.power(s,2))))/np.power(s,3)
return np.array([x,y,z])
def dd_pd(self, t, s, reverse=False):
"""The desired acceleration of the built-in trajectory
Args:
t (float): The parametric value that guides the equation
s (float): How steep and agressive the curve is
reverse (bool, optional): Choose whether we want to flip the curve (so that we can have 2 drones almost touching). Defaults to False.
Returns:
np.ndarray: A 3x1 array with the dd_x, dd_y ,dd_z desired [m/s^2]
"""
x = 0.0
y = (np.power(t,2)*np.exp(-np.power(t,2)/(2*np.power(s,2))))/np.power(s,5) - np.exp(-np.power(t,2)/(2*np.power(s,2)))/np.power(s,3)
z = (np.power(t,2)*np.exp(-np.power(t,2)/(2*np.power(s,2))))/np.power(s,5) - np.exp(-np.power(t,2)/(2*np.power(s,2)))/np.power(s,3)
if reverse == True:
y = np.exp(-np.power(t,2)/(2*np.power(s,2)))/np.power(s,3) - (np.power(t,2)*np.exp(-np.power(t,2)/(2*np.power(s,2))))/np.power(s,5)
return np.array([x,y,z])
def ddd_pd(self, t, s, reverse=False):
"""The desired jerk of the built-in trajectory
Args:
t (float): The parametric value that guides the equation
s (float): How steep and agressive the curve is
reverse (bool, optional): Choose whether we want to flip the curve (so that we can have 2 drones almost touching). Defaults to False.
Returns:
np.ndarray: A 3x1 array with the ddd_x, ddd_y ,ddd_z desired [m/s^3]
"""
x = 0.0
y = (3*t*np.exp(-np.power(t,2)/(2*np.power(s,2))))/np.power(s,5) - (np.power(t,3)*np.exp(-np.power(t,2)/(2*np.power(s,2))))/np.power(s,7)
z = (3*t*np.exp(-np.power(t,2)/(2*np.power(s,2))))/np.power(s,5) - (np.power(t,3)*np.exp(-np.power(t,2)/(2*np.power(s,2))))/np.power(s,7)
if reverse == True:
y = (np.power(t,3)*np.exp(-np.power(t,2)/(2*np.power(s,2))))/np.power(s,7) - (3*t*np.exp(-np.power(t,2)/(2*np.power(s,2))))/np.power(s,5)
return np.array([x,y,z])
def yaw_d(self, t, s):
"""The desired yaw of the built-in trajectory
Args:
t (float): The parametric value that guides the equation
s (float): How steep and agressive the curve is
reverse (bool, optional): Choose whether we want to flip the curve (so that we can have 2 drones almost touching). Defaults to False.
Returns:
np.ndarray: A float with the desired yaw in rad
"""
return 0.0
def d_yaw_d(self, t, s):
"""The desired yaw_rate of the built-in trajectory
Args:
t (float): The parametric value that guides the equation
s (float): How steep and agressive the curve is
reverse (bool, optional): Choose whether we want to flip the curve (so that we can have 2 drones almost touching). Defaults to False.
Returns:
np.ndarray: A float with the desired yaw_rate in rad/s
"""
return 0.0 | 18,171 | Python | 41.064815 | 149 | 0.587144 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/setup.py | """
| File: setup.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: File that defines the installation requirements for this python package.
"""
import os
import toml
from setuptools import setup
# Obtain the extension data from the extension.toml file
EXTENSION_PATH = os.path.dirname(os.path.realpath(__file__))
# Read the extension.toml file
EXTENSION_TOML_DATA = toml.load(os.path.join(EXTENSION_PATH, "config", "extension.toml"))
# Minimum dependencies required prior to installation
INSTALL_REQUIRES = [
# generic
"numpy",
"pymavlink",
"scipy",
"pyyaml",
]
# Installation operation
setup(
name="pegasus-simulator",
author="Marcelo Jacinto",
maintainer="Marcelo Jacinto",
maintainer_email="[email protected]",
url=EXTENSION_TOML_DATA["package"]["repository"],
version=EXTENSION_TOML_DATA["package"]["version"],
description=EXTENSION_TOML_DATA["package"]["description"],
keywords=EXTENSION_TOML_DATA["package"]["keywords"],
license="BSD-3-Clause",
include_package_data=True,
python_requires=">=3.7",
install_requires=INSTALL_REQUIRES,
packages=["pegasus.simulator"],
classifiers=["Natural Language :: English", "Programming Language :: Python :: 3.7"],
zip_safe=False,
) | 1,389 | Python | 31.325581 | 89 | 0.712743 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/extension.py | """
| File: extension.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Implements the Pegasus_SimulatorExtension which omni.ext.IExt that is created when this class is enabled. In turn, this class initializes the extension widget.
"""
__all__ = ["Pegasus_SimulatorExtension"]
# Python garbage collenction and asyncronous API
import gc
import asyncio
from functools import partial
from threading import Timer
# Omniverse general API
import pxr
import carb
import omni.ext
import omni.usd
import omni.kit.ui
import omni.kit.app
import omni.ui as ui
from omni.kit.viewport.utility import get_active_viewport
# Pegasus Extension Files and API
from pegasus.simulator.params import MENU_PATH, WINDOW_TITLE
from pegasus.simulator.logic.interface.pegasus_interface import PegasusInterface
# Setting up the UI for the extension's Widget
from pegasus.simulator.ui.ui_window import WidgetWindow
from pegasus.simulator.ui.ui_delegate import UIDelegate
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class Pegasus_SimulatorExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
carb.log_info("Pegasus Simulator is starting up")
# Save the extension id
self._ext_id = ext_id
# Create the UI of the app and its manager
self.ui_delegate = None
self.ui_window = None
# Start the extension backend
self._pegasus_sim = PegasusInterface()
# Check if we already have a stage loaded (when using autoload feature, it might not be ready yet)
# This is a limitation of the simulator, and we are doing this to make sure that the
# extension does no crash when using the GUI with autoload feature
# If autoload was not enabled, and we are enabling the extension from the Extension widget, then
# we will always have a state open, and the auxiliary timer will never run
if omni.usd.get_context().get_stage_state() != omni.usd.StageState.CLOSED:
self._pegasus_sim.initialize_world()
else:
# We need to create a timer to check until the window is properly open and the stage created. This is a limitation
# of the current Isaac Sim simulator and the way it loads extensions :(
self.autoload_helper()
# Add the ability to show the window if the system requires it (QuickLayout feature)
ui.Workspace.set_show_window_fn(WINDOW_TITLE, partial(self.show_window, None))
# Add the extension to the editor menu inside isaac sim
editor_menu = omni.kit.ui.get_editor_menu()
if editor_menu:
self._menu = editor_menu.add_item(MENU_PATH, self.show_window, toggle=True, value=True)
# Show the window (It call the self.show_window)
ui.Workspace.show_window(WINDOW_TITLE, show=True)
def autoload_helper(self):
# Check if we already have a viewport and a camera of interest
if get_active_viewport() != None and type(get_active_viewport().stage) == pxr.Usd.Stage and str(get_active_viewport().stage.GetPrimAtPath("/OmniverseKit_Persp")) != "invalid null prim":
self._pegasus_sim.initialize_world()
else:
Timer(1.0, self.autoload_helper).start()
def show_window(self, menu, show):
"""
Method that controls whether a widget window is created or not
"""
if show == True:
# Create a window and its delegate
self.ui_delegate = UIDelegate()
self.ui_window = WidgetWindow(self.ui_delegate)
self.ui_window.set_visibility_changed_fn(self._visibility_changed_fn)
# If we have a window and we are not supposed to show it, then change its visibility
elif self.ui_window:
self.ui_window.visible = False
def _visibility_changed_fn(self, visible):
"""
This method is invoked when the user pressed the "X" to close the extension window
"""
# Update the Isaac sim menu visibility
self._set_menu(visible)
if not visible:
# Destroy the window, because we create a new one in the show window method
asyncio.ensure_future(self._destroy_window_async())
def _set_menu(self, visible):
"""
Method that updates the isaac sim ui menu to create the Widget window on and off
"""
editor_menu = omni.kit.ui.get_editor_menu()
if editor_menu:
editor_menu.set_value(MENU_PATH, visible)
async def _destroy_window_async(self):
# Wait one frame before it gets destructed (from NVidia example)
await omni.kit.app.get_app().next_update_async()
# Destroy the window UI if it exists
if self.ui_window:
self.ui_window.destroy()
self.ui_window = None
def on_shutdown(self):
"""
Callback called when the extension is shutdown
"""
carb.log_info("Pegasus Isaac extension shutdown")
# Destroy the isaac sim menu object
self._menu = None
# Destroy the window
if self.ui_window:
self.ui_window.destroy()
self.ui_window = None
# Destroy the UI delegate
if self.ui_delegate:
self.ui_delegate = None
# De-register the function taht shows the window from the isaac sim ui
ui.Workspace.set_show_window_fn(WINDOW_TITLE, None)
editor_menu = omni.kit.ui.get_editor_menu()
if editor_menu:
editor_menu.remove_item(MENU_PATH)
# Call the garbage collector
gc.collect()
| 6,081 | Python | 37.493671 | 193 | 0.666996 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/__init__.py | """
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
__author__ = "Marcelo Jacinto"
__email__ = "[email protected]"
from .extension import Pegasus_SimulatorExtension | 285 | Python | 30.777774 | 82 | 0.740351 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/params.py | """
| File: params.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: File that defines the base configurations for the Pegasus Simulator.
"""
import os
from pathlib import Path
import omni.isaac.core.utils.nucleus as nucleus
# Extension configuration
EXTENSION_NAME = "Pegasus Simulator"
WINDOW_TITLE = "Pegasus Simulator"
MENU_PATH = "Window/" + WINDOW_TITLE
DOC_LINK = "https://docs.omniverse.nvidia.com"
EXTENSION_OVERVIEW = "This extension shows how to incorporate drones into Isaac Sim"
# Get the current directory of where this extension is located
EXTENSION_FOLDER_PATH = Path(os.path.dirname(os.path.realpath(__file__)))
ROOT = str(EXTENSION_FOLDER_PATH.parent.parent.parent.resolve())
# Get the configurations file path
CONFIG_FILE = ROOT + "/pegasus.simulator/config/configs.yaml"
# Define the Extension Assets Path
ASSET_PATH = ROOT + "/pegasus.simulator/pegasus/simulator/assets"
ROBOTS_ASSETS = ASSET_PATH + "/Robots"
# Define the built in robots of the extension
ROBOTS = {"Iris": ROBOTS_ASSETS + "/Iris/iris.usd"} #, "Flying Cube": ROBOTS_ASSETS + "/iris_cube.usda"}
# Setup the default simulation environments path
NVIDIA_ASSETS_PATH = str(nucleus.get_assets_root_path())
ISAAC_SIM_ENVIRONMENTS = "/Isaac/Environments"
NVIDIA_SIMULATION_ENVIRONMENTS = {
"Default Environment": "Grid/default_environment.usd",
"Black Gridroom": "Grid/gridroom_black.usd",
"Curved Gridroom": "Grid/gridroom_curved.usd",
"Hospital": "Hospital/hospital.usd",
"Office": "Office/office.usd",
"Simple Room": "Simple_Room/simple_room.usd",
"Warehouse": "Simple_Warehouse/warehouse.usd",
"Warehouse with Forklifts": "Simple_Warehouse/warehouse_with_forklifts.usd",
"Warehouse with Shelves": "Simple_Warehouse/warehouse_multiple_shelves.usd",
"Full Warehouse": "Simple_Warehouse/full_warehouse.usd",
"Flat Plane": "Terrains/flat_plane.usd",
"Rough Plane": "Terrains/rough_plane.usd",
"Slope Plane": "Terrains/slope.usd",
"Stairs Plane": "Terrains/stairs.usd",
}
OMNIVERSE_ENVIRONMENTS = {
"Exhibition Hall": "omniverse://localhost/NVIDIA/Assets/Scenes/Templates/Interior/ZetCG_ExhibitionHall.usd"
}
SIMULATION_ENVIRONMENTS = {}
# Add the Isaac Sim assets to the list
for asset in NVIDIA_SIMULATION_ENVIRONMENTS:
SIMULATION_ENVIRONMENTS[asset] = (
NVIDIA_ASSETS_PATH + ISAAC_SIM_ENVIRONMENTS + "/" + NVIDIA_SIMULATION_ENVIRONMENTS[asset]
)
# Add the omniverse assets to the list
for asset in OMNIVERSE_ENVIRONMENTS:
SIMULATION_ENVIRONMENTS[asset] = OMNIVERSE_ENVIRONMENTS[asset]
# Define the default settings for the simulation environment
DEFAULT_WORLD_SETTINGS = {"physics_dt": 1.0 / 250.0, "stage_units_in_meters": 1.0, "rendering_dt": 1.0 / 60.0}
# Define where the thumbnail of the vehicle is located
THUMBNAIL = ROBOTS_ASSETS + "/Iris/iris_thumbnail.png"
# Define where the thumbail of the world is located
WORLD_THUMBNAIL = ASSET_PATH + "/Worlds/Empty_thumbnail.png"
| 3,070 | Python | 38.883116 | 111 | 0.739414 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/parser/dynamics_parser.py | # Copyright (c) 2023, Marcelo Jacinto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# Sensors that can be used with the vehicles
from pegasus.simulator.parser import Parser
from pegasus.simulator.logic.dynamics import LinearDrag
class DynamicsParser(Parser):
def __init__(self):
# Dictionary of available sensors to instantiate
self.dynamics = {"linear_drag": LinearDrag}
def parse(self, data_type: str, data_dict):
# Get the class of the sensor
dynamics_cls = self.dynamics[data_type]
# Create an instance of that sensor
return dynamics_cls(data_dict)
| 635 | Python | 25.499999 | 56 | 0.699213 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/parser/parser.py | # Copyright (c) 2023, Marcelo Jacinto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
class Parser:
def __init__(self):
pass
def parse(self, data_type: str, data_dict):
pass
| 218 | Python | 15.846153 | 47 | 0.62844 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/parser/thrusters_parser.py | # Copyright (c) 2023, Marcelo Jacinto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# Sensors that can be used with the vehicles
from pegasus.simulator.parser import Parser
from pegasus.simulator.logic.thrusters import QuadraticThrustCurve
class ThrustersParser(Parser):
def __init__(self):
# Dictionary of available thrust curves to instantiate
self.thrust_curves = {"quadratic_thrust_curve": QuadraticThrustCurve}
def parse(self, data_type: str, data_dict):
# Get the class of the sensor
thrust_curve_cls = self.thrust_curves[data_type]
# Create an instance of that sensor
return thrust_curve_cls(data_dict)
| 692 | Python | 27.874999 | 77 | 0.715318 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/parser/vehicle_parser.py | # Copyright (c) 2023, Marcelo Jacinto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import carb
# Sensors that can be used with the vehicles
from pegasus.simulator.parser import Parser, SensorParser, ThrustersParser, DynamicsParser, BackendsParser
from pegasus.simulator.logic.vehicles import MultirotorConfig
class VehicleParser(Parser):
def __init__(self):
# Initialize the Parser object
super().__init__()
# Initialize Parsers for the sensors, dynamics and backends for control and communications
self.sensor_parser = SensorParser()
self.thrusters_parser = ThrustersParser()
self.dynamics_parser = DynamicsParser()
self.backends_parser = BackendsParser()
def parse(self, data_type: str, data_dict={}):
# Get the USD model associated with the vehicle
usd_model = data_dict.get("usd_model", "")
# Get the model thumbnail of the vehicle
thumbnail = data_dict.get("thumbnail", "")
# ---------------------------------------
# Generate the sensors for the multirotor
# ---------------------------------------
sensors = []
sensors_config = data_dict.get("sensors", {})
for sensor_name in sensors_config:
sensor = self.sensor_parser.parse(sensor_name, sensors_config[sensor_name])
if sensor is not None:
sensors.append(sensor)
# -----------------------------------------
# Generate the thrusters for the multirotor
# -----------------------------------------
thrusters = None
thrusters_config = data_dict.get("thrusters", {})
# Note: if a dictionary/yaml file contains more than one thrust curve configuration,
# only the last one will be kept
for thrust_curve_name in thrusters_config:
curve = self.thrusters_parser.parse(thrust_curve_name, thrusters_config[thrust_curve_name])
if curve is not None:
thrusters = curve
# ----------------------------------------
# Generate the dynamics for the multirotor
# ----------------------------------------
dynamics = None
dynamics_config = data_dict.get("drag", {})
for dynamics_name in dynamics_config:
carb.log_warn(dynamics_config[dynamics_name])
dynamic = self.dynamics_parser.parse(dynamics_name, dynamics_config[dynamics_name])
if dynamic is not None:
dynamics = dynamic
# ----------------------------------------
# Generate the backends for the multirotor
# ----------------------------------------
backends = []
backends_config = data_dict.get("backends", {})
for backends_name in backends_config:
backend = self.backends_parser.parse(backends_name, backends_config[backends_name])
if backend is not None:
backends.append(backend)
# Create a Multirotor config from the parsed data
multirotor_configuration = MultirotorConfig()
multirotor_configuration.usd_file = usd_model
multirotor_configuration.thrust_curve = thrusters
multirotor_configuration.drag = dynamics
multirotor_configuration.sensors = sensors
multirotor_configuration.backends = backends
return multirotor_configuration
| 3,406 | Python | 37.715909 | 106 | 0.57751 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/parser/__init__.py | # Copyright (c) 2023, Marcelo Jacinto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from .parser import Parser
from .sensor_parser import SensorParser
from .thrusters_parser import ThrustersParser
from .dynamics_parser import DynamicsParser
from .backends_parser import BackendsParser
from .graphs_parser import GraphParser
| 343 | Python | 30.272725 | 45 | 0.819242 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/parser/sensor_parser.py | # Copyright (c) 2023, Marcelo Jacinto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# Sensors that can be used with the vehicles
from pegasus.simulator.parser import Parser
from pegasus.simulator.logic.sensors import Barometer, GPS, IMU, Magnetometer
class SensorParser(Parser):
def __init__(self):
# Dictionary of available sensors to instantiate
self.sensors = {"barometer": Barometer, "gps": GPS, "imu": IMU, "magnetometer": Magnetometer}
def parse(self, data_type: str, data_dict):
# Get the class of the sensor
sensor_cls = self.sensors[data_type]
# Create an instance of that sensor
return sensor_cls(data_dict)
| 700 | Python | 28.208332 | 101 | 0.694286 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/parser/backends_parser.py | # Copyright (c) 2023, Marcelo Jacinto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# Sensors that can be used with the vehicles
from pegasus.simulator.parser import Parser
from pegasus.simulator.logic.backends import MavlinkBackendConfig, MavlinkBackend, ROS2Backend
class BackendsParser(Parser):
# TODO - improve the structure of the backends in order to clean this parser
def __init__(self):
# Dictionary of available sensors to instantiate
self.backends = {"mavlink": MavlinkBackendConfig, "ros2": ROS2Backend}
def parse(self, data_type: str, data_dict):
# Get the class of the sensor
backends_cls = self.backends[data_type]
if backends_cls == MavlinkBackendConfig:
return MavlinkBackend(backends_cls(data_dict))
# Create an instance of that sensor
return backends_cls(data_dict)
| 892 | Python | 29.793102 | 94 | 0.709641 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/parser/graphs_parser.py | # Copyright (c) 2023, Marcelo Jacinto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# Graphs that can be used with the vehicles
from pegasus.simulator.parser import Parser
from pegasus.simulator.logic.graphs import ROS2Camera
class GraphParser(Parser):
def __init__(self):
# Dictionary of available graphs to instantiate
self.graphs = {
"ROS2 Camera": ROS2Camera
}
def parse(self, data_type: str, data_dict):
# Get the class of the graph
graph_cls = self.graphs[data_type]
# Create an instance of that graph
return graph_cls(data_dict) | 637 | Python | 24.519999 | 55 | 0.66719 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/ui/ui_window.py | """
| File: ui_window.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Definition of WidgetWindow which contains all the UI code that defines the extension GUI
"""
__all__ = ["WidgetWindow"]
# External packages
import numpy as np
# Omniverse general API
import carb
import omni.ui as ui
from omni.ui import color as cl
from pegasus.simulator.ui.ui_delegate import UIDelegate
from pegasus.simulator.params import ROBOTS, SIMULATION_ENVIRONMENTS, THUMBNAIL, WORLD_THUMBNAIL, WINDOW_TITLE
class WidgetWindow(ui.Window):
# Design constants for the widgets
LABEL_PADDING = 120
BUTTON_HEIGHT = 50
GENERAL_SPACING = 5
WINDOW_WIDTH = 325
WINDOW_HEIGHT = 850
BUTTON_SELECTED_STYLE = {
"Button": {
"background_color": 0xFF5555AA,
"border_color": 0xFF5555AA,
"border_width": 2,
"border_radius": 5,
"padding": 5,
}
}
BUTTON_BASE_STYLE = {
"Button": {
"background_color": cl("#292929"),
"border_color": cl("#292929"),
"border_width": 2,
"border_radius": 5,
"padding": 5,
}
}
def __init__(self, delegate: UIDelegate, **kwargs):
"""
Constructor for the Window UI widget of the extension. Receives as input a UIDelegate that implements
all the callbacks to handle button clicks, drop-down menu actions, etc. (abstracting the interface between
the logic of the code and the ui)
"""
# Setup the base widget window
super().__init__(
WINDOW_TITLE, width=WidgetWindow.WINDOW_WIDTH, height=WidgetWindow.WINDOW_HEIGHT, visible=True, **kwargs
)
self.deferred_dock_in("Property", ui.DockPolicy.CURRENT_WINDOW_IS_ACTIVE)
# Setup the delegate that will bridge between the logic and the UI
self._delegate = delegate
# Bind the UI delegate to this window
self._delegate.set_window_bind(self)
# Auxiliar attributes for getting the transforms of the vehicle and the camera from the UI
self._camera_transform_models = []
self._vehicle_transform_models = []
# Build the actual window UI
self._build_window()
def destroy(self):
# Clear the world and the stage correctly
self._delegate.on_clear_scene()
# It will destroy all the children
super().destroy()
def _build_window(self):
# Define the UI of the widget window
with self.frame:
with ui.ScrollingFrame(horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON, vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON):
# Vertical Stack of menus
with ui.VStack():
# Create a frame for selecting which scene to load
self._scene_selection_frame()
ui.Spacer(height=5)
# Create a frame for selecting which vehicle to load in the simulation environment
self._robot_selection_frame()
ui.Spacer(height=5)
# Create a frame for selecting the camera position, and what it should point torwards to
self._viewport_camera_frame()
ui.Spacer()
def _scene_selection_frame(self):
"""
Method that implements a dropdown menu with the list of available simulation environemts for the vehicle
"""
# Frame for selecting the simulation environment to load
with ui.CollapsableFrame("Scene Selection"):
with ui.VStack(height=0, spacing=10, name="frame_v_stack"):
ui.Spacer(height=WidgetWindow.GENERAL_SPACING)
# Iterate over all existing pre-made worlds bundled with this extension
with ui.HStack():
ui.Label("World Assets", width=WidgetWindow.LABEL_PADDING, height=10.0)
# Combo box with the available environments to select from
dropdown_menu = ui.ComboBox(0, height=10, name="environments")
for environment in SIMULATION_ENVIRONMENTS:
dropdown_menu.model.append_child_item(None, ui.SimpleStringModel(environment))
# Allow the delegate to know which option was selected in the dropdown menu
self._delegate.set_scene_dropdown(dropdown_menu.model)
ui.Spacer(height=0)
# UI to configure the default latitude, longitude and altitude coordinates
with ui.CollapsableFrame("Geographic Coordinates", collapsed=False):
with ui.VStack(height=0, spacing=10, name="frame_v_stack"):
with ui.HStack():
# Latitude
ui.Label("Latitude", name="label", width=WidgetWindow.LABEL_PADDING-50)
latitude_field = ui.FloatField(name="latitude", precision=6)
latitude_field.model.set_value(self._delegate._latitude)
self._delegate.set_latitude_field(latitude_field.model)
ui.Circle(name="transform", width=20, height=20, radius=3.5, size_policy=ui.CircleSizePolicy.FIXED)
# Longitude
ui.Label("Longitude", name="label", width=WidgetWindow.LABEL_PADDING-50)
longitude_field = ui.FloatField(name="longitude", precision=6)
longitude_field.model.set_value(self._delegate._longitude)
self._delegate.set_longitude_field(longitude_field.model)
ui.Circle(name="transform", width=20, height=20, radius=3.5, size_policy=ui.CircleSizePolicy.FIXED)
# Altitude
ui.Label("Altitude", name="label", width=WidgetWindow.LABEL_PADDING-50)
altitude_field = ui.FloatField(name="altitude", precision=6)
altitude_field.model.set_value(self._delegate._altitude)
self._delegate.set_altitude_field(altitude_field.model)
ui.Circle(name="transform", width=20, height=20, radius=3.5, size_policy=ui.CircleSizePolicy.FIXED)
with ui.HStack():
ui.Button("Set", enabled=True, clicked_fn=self._delegate.on_set_new_global_coordinates)
ui.Button("Reset", enabled=True, clicked_fn=self._delegate.on_reset_global_coordinates)
ui.Button("Make Default", enabled=True, clicked_fn=self._delegate.on_set_new_default_global_coordinates)
ui.Spacer(height=0)
with ui.HStack():
# Add a thumbnail image to have a preview of the world that is about to be loaded
with ui.ZStack(width=WidgetWindow.LABEL_PADDING, height=WidgetWindow.BUTTON_HEIGHT * 2):
ui.Rectangle()
ui.Image(
WORLD_THUMBNAIL,
fill_policy=ui.FillPolicy.PRESERVE_ASPECT_FIT,
alignment=ui.Alignment.LEFT_CENTER,
)
ui.Spacer(width=WidgetWindow.GENERAL_SPACING)
with ui.VStack():
# Button for loading a desired scene
ui.Button(
"Load Scene",
height=WidgetWindow.BUTTON_HEIGHT,
clicked_fn=self._delegate.on_load_scene,
style=WidgetWindow.BUTTON_BASE_STYLE,
)
# Button to reset the stage
ui.Button(
"Clear Scene",
height=WidgetWindow.BUTTON_HEIGHT,
clicked_fn=self._delegate.on_clear_scene,
style=WidgetWindow.BUTTON_BASE_STYLE,
)
def _robot_selection_frame(self):
"""
Method that implements a frame that allows the user to choose which robot that is about to be spawned
"""
# Auxiliary function to handle the "switch behaviour" of the buttons that are used to choose between a px4 or ROS2 backend
def handle_px4_ros_switch(self, px4_button, ros2_button, button):
# Handle the UI of both buttons switching of and on (To make it prettier)
if button == "px4":
px4_button.enabled = False
ros2_button.enabled = True
px4_button.set_style(WidgetWindow.BUTTON_SELECTED_STYLE)
ros2_button.set_style(WidgetWindow.BUTTON_BASE_STYLE)
else:
px4_button.enabled = True
ros2_button.enabled = False
ros2_button.set_style(WidgetWindow.BUTTON_SELECTED_STYLE)
px4_button.set_style(WidgetWindow.BUTTON_BASE_STYLE)
# Handle the logic of switching between the two operating modes
self._delegate.set_streaming_backend(button)
# --------------------------
# Function UI starts here
# --------------------------
# Frame for selecting the vehicle to load
with ui.CollapsableFrame(title="Vehicle Selection"):
with ui.VStack(height=0, spacing=10, name="frame_v_stack"):
ui.Spacer(height=WidgetWindow.GENERAL_SPACING)
# Iterate over all existing robots in the extension
with ui.HStack():
ui.Label("Vehicle Model", name="label", width=WidgetWindow.LABEL_PADDING)
# Combo box with the available vehicles to select from
dropdown_menu = ui.ComboBox(0, height=10, name="robots")
for robot in ROBOTS:
dropdown_menu.model.append_child_item(None, ui.SimpleStringModel(robot))
self._delegate.set_vehicle_dropdown(dropdown_menu.model)
with ui.HStack():
ui.Label("Vehicle ID", name="label", width=WidgetWindow.LABEL_PADDING)
vehicle_id_field = ui.IntField()
self._delegate.set_vehicle_id_field(vehicle_id_field.model)
# Add a frame transform to select the position of where to place the selected robot in the world
self._transform_frame()
ui.Label("Streaming Backend")
with ui.HStack():
# Add a thumbnail image to have a preview of the world that is about to be loaded
with ui.ZStack(width=WidgetWindow.LABEL_PADDING, height=WidgetWindow.BUTTON_HEIGHT * 2):
ui.Rectangle()
ui.Image(
THUMBNAIL, fill_policy=ui.FillPolicy.PRESERVE_ASPECT_FIT, alignment=ui.Alignment.LEFT_CENTER
)
ui.Spacer(width=WidgetWindow.GENERAL_SPACING)
with ui.VStack():
# Buttons that behave like switches to choose which network interface to use to simulate the control of the vehicle
px4_button = ui.Button(
"PX4",
height=WidgetWindow.BUTTON_HEIGHT * 2,
style=WidgetWindow.BUTTON_SELECTED_STYLE,
enabled=False,
)
ros2_button = ui.Button(
"ROS 2",
height=WidgetWindow.BUTTON_HEIGHT,
style=WidgetWindow.BUTTON_BASE_STYLE,
enabled=True,
visible=False
)
# Set the auxiliary function to handle the switch between both backends
px4_button.set_clicked_fn(lambda: handle_px4_ros_switch(self, px4_button, ros2_button, "px4"))
ros2_button.set_clicked_fn(lambda: handle_px4_ros_switch(self, px4_button, ros2_button, "ros"))
# UI to configure the PX4 settings
with ui.CollapsableFrame("PX4 Configurations", collapsed=False):
with ui.VStack(height=0, spacing=10, name="frame_v_stack"):
with ui.HStack():
ui.Label("Auto-launch PX4", name="label", width=WidgetWindow.LABEL_PADDING - 20)
px4_checkbox = ui.CheckBox()
px4_checkbox.model.set_value(self._delegate._autostart_px4)
self._delegate.set_px4_autostart_checkbox(px4_checkbox.model)
with ui.HStack():
ui.Label("PX4 Path", name="label", width=WidgetWindow.LABEL_PADDING - 20)
px4_path_field = ui.StringField(name="px4_path", width=300)
px4_path_field.model.set_value(self._delegate._px4_dir)
self._delegate.set_px4_directory_field(px4_path_field.model)
ui.Button("Reset", enabled=True, clicked_fn=self._delegate.on_reset_px4_path)
ui.Button("Make Default", enabled=True, clicked_fn=self._delegate.on_set_new_default_px4_path)
with ui.HStack():
ui.Label("PX4 airframe", name="label", width=WidgetWindow.LABEL_PADDING - 20)
px4_airframe_field = ui.StringField(name="px4_model")
px4_airframe_field.model.set_value(self._delegate._px4_airframe)
self._delegate.set_px4_airframe_field(px4_airframe_field.model)
# Button to load the drone
ui.Button(
"Load Vehicle",
height=WidgetWindow.BUTTON_HEIGHT,
clicked_fn=self._delegate.on_load_vehicle,
style=WidgetWindow.BUTTON_BASE_STYLE,
)
def _viewport_camera_frame(self):
"""
Method that implements a frame that allows the user to choose what is the viewport camera pose easily
"""
all_axis = ["X", "Y", "Z"]
colors = {"X": 0xFF5555AA, "Y": 0xFF76A371, "Z": 0xFFA07D4F}
default_values = [5.0, 5.0, 5.0]
target_default_values = [0.0, 0.0, 0.0]
# Frame for setting the camera to visualize the vehicle in the simulator viewport
with ui.CollapsableFrame("Viewport Camera"):
with ui.VStack(spacing=8):
ui.Spacer(height=0)
# Iterate over the position and rotation menus
with ui.HStack():
with ui.HStack():
ui.Label("Position", name="transform", width=50, height=20)
ui.Spacer()
# Fields X, Y and Z
for axis, default_value in zip(all_axis, default_values):
with ui.HStack():
with ui.ZStack(width=15):
ui.Rectangle(
width=15,
height=20,
style={
"background_color": colors[axis],
"border_radius": 3,
"corner_flag": ui.CornerFlag.LEFT,
},
)
ui.Label(axis, height=20, name="transform_label", alignment=ui.Alignment.CENTER)
float_drag = ui.FloatDrag(name="transform", min=-1000000, max=1000000, step=0.01)
float_drag.model.set_value(default_value)
# Save the model of each FloatDrag such that we can access its values later on
self._camera_transform_models.append(float_drag.model)
ui.Circle(
name="transform", width=20, height=20, radius=3.5, size_policy=ui.CircleSizePolicy.FIXED
)
# Iterate over the position and rotation menus
with ui.HStack():
with ui.HStack():
ui.Label("Target", name="transform", width=50, height=20)
ui.Spacer()
# Fields X, Y and Z
for axis, default_value in zip(all_axis, target_default_values):
with ui.HStack():
with ui.ZStack(width=15):
ui.Rectangle(
width=15,
height=20,
style={
"background_color": colors[axis],
"border_radius": 3,
"corner_flag": ui.CornerFlag.LEFT,
},
)
ui.Label(axis, height=20, name="transform_label", alignment=ui.Alignment.CENTER)
float_drag = ui.FloatDrag(name="transform", min=-1000000, max=1000000, step=0.01)
float_drag.model.set_value(default_value)
# Save the model of each FloatDrag such that we can access its values later on
self._camera_transform_models.append(float_drag.model)
ui.Circle(
name="transform", width=20, height=20, radius=3.5, size_policy=ui.CircleSizePolicy.FIXED
)
# Button to set the camera view
ui.Button(
"Set Camera Pose",
height=WidgetWindow.BUTTON_HEIGHT,
clicked_fn=self._delegate.on_set_viewport_camera,
style=WidgetWindow.BUTTON_BASE_STYLE,
)
ui.Spacer()
def _transform_frame(self):
"""
Method that implements a transform frame to translate and rotate an object that is about to be spawned
"""
components = ["Position", "Rotation"]
all_axis = ["X", "Y", "Z"]
colors = {"X": 0xFF5555AA, "Y": 0xFF76A371, "Z": 0xFFA07D4F}
default_values = [0.0, 0.0, 0.1]
with ui.CollapsableFrame("Position and Orientation"):
with ui.VStack(spacing=8):
ui.Spacer(height=0)
# Iterate over the position and rotation menus
for component in components:
with ui.HStack():
with ui.HStack():
ui.Label(component, name="transform", width=50)
ui.Spacer()
# Fields X, Y and Z
for axis, default_value in zip(all_axis, default_values):
with ui.HStack():
with ui.ZStack(width=15):
ui.Rectangle(
width=15,
height=20,
style={
"background_color": colors[axis],
"border_radius": 3,
"corner_flag": ui.CornerFlag.LEFT,
},
)
ui.Label(axis, name="transform_label", alignment=ui.Alignment.CENTER)
if component == "Position":
float_drag = ui.FloatDrag(name="transform", min=-1000000, max=1000000, step=0.01)
float_drag.model.set_value(default_value)
else:
float_drag = ui.FloatDrag(name="transform", min=-180.0, max=180.0, step=0.01)
# Save the model of each FloatDrag such that we can access its values later on
self._vehicle_transform_models.append(float_drag.model)
ui.Circle(name="transform", width=20, radius=3.5, size_policy=ui.CircleSizePolicy.FIXED)
ui.Spacer(height=0)
# ------------------------------------------------------------------------------------------------
# TODO - optimize the reading of values from the transform widget. This could be one function only
# ------------------------------------------------------------------------------------------------
def get_selected_vehicle_attitude(self):
# Extract the vehicle desired position and orientation for spawning
if len(self._vehicle_transform_models) == 6:
vehicle_pos = np.array([self._vehicle_transform_models[i].get_value_as_float() for i in range(3)])
vehicel_orientation = np.array(
[self._vehicle_transform_models[i].get_value_as_float() for i in range(3, 6)]
)
return vehicle_pos, vehicel_orientation
return None, None
def get_selected_camera_pos(self):
"""
Method that returns the currently selected camera position in the camera transform widget
"""
# Extract the camera desired position and the target it is pointing to
if len(self._camera_transform_models) == 6:
camera_pos = np.array([self._camera_transform_models[i].get_value_as_float() for i in range(3)])
camera_target = np.array([self._camera_transform_models[i].get_value_as_float() for i in range(3, 6)])
return camera_pos, camera_target
return None, None
| 22,406 | Python | 48.030634 | 169 | 0.512407 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/ui/ui_delegate.py | """
| File: ui_delegate.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Definition of the UiDelegate which is an abstraction layer betweeen the extension UI and code logic features
"""
# External packages
import os
import asyncio
from scipy.spatial.transform import Rotation
# Omniverse extensions
import carb
import omni.ui as ui
# Extension Configurations
from pegasus.simulator.params import ROBOTS, SIMULATION_ENVIRONMENTS
from pegasus.simulator.logic.interface.pegasus_interface import PegasusInterface
# Vehicle Manager to spawn Vehicles
from pegasus.simulator.logic.backends import MavlinkBackend, MavlinkBackendConfig #, ROS2Backend
from pegasus.simulator.logic.vehicles.multirotor import Multirotor, MultirotorConfig
from pegasus.simulator.logic.vehicle_manager import VehicleManager
class UIDelegate:
"""
Object that will interface between the logic/dynamic simulation part of the extension and the Widget UI
"""
def __init__(self):
# The window that will be bound to this delegate
self._window = None
# Get an instance of the pegasus simulator
self._pegasus_sim: PegasusInterface = PegasusInterface()
# Attribute that holds the currently selected scene from the dropdown menu
self._scene_dropdown: ui.AbstractItemModel = None
self._scene_names = list(SIMULATION_ENVIRONMENTS.keys())
# Selected latitude, longitude and altitude
self._latitude_field: ui.AbstractValueModel = None
self._latitude = PegasusInterface().latitude
self._longitude_field: ui.AbstractValueModel = None
self._longitude = PegasusInterface().longitude
self._altitude_field: ui.AbstractValueModel = None
self._altitude = PegasusInterface().altitude
# Attribute that hold the currently selected vehicle from the dropdown menu
self._vehicle_dropdown: ui.AbstractItemModel = None
self._vehicles_names = list(ROBOTS.keys())
# Get an instance of the vehicle manager
self._vehicle_manager = VehicleManager()
# Selected option for broadcasting the simulated vehicle (PX4+ROS2 or just ROS2)
# By default we assume PX4
self._streaming_backend: str = "px4"
# Selected value for the the id of the vehicle
self._vehicle_id_field: ui.AbstractValueModel = None
self._vehicle_id: int = 0
# Attribute that will save the model for the px4-autostart checkbox
self._px4_autostart_checkbox: ui.AbstractValueModel = None
self._autostart_px4: bool = True
# Atributes to store the path for the Px4 directory
self._px4_directory_field: ui.AbstractValueModel = None
self._px4_dir: str = PegasusInterface().px4_path
# Atributes to store the PX4 airframe
self._px4_airframe_field: ui.AbstractValueModel = None
self._px4_airframe: str = self._pegasus_sim.px4_default_airframe
def set_window_bind(self, window):
self._window = window
def set_scene_dropdown(self, scene_dropdown_model: ui.AbstractItemModel):
self._scene_dropdown = scene_dropdown_model
def set_latitude_field(self, latitude_model: ui.AbstractValueModel):
self._latitude_field = latitude_model
def set_longitude_field(self, longitude_model: ui.AbstractValueModel):
self._longitude_field = longitude_model
def set_altitude_field(self, altitude_model: ui.AbstractValueModel):
self._altitude_field = altitude_model
def set_vehicle_dropdown(self, vehicle_dropdown_model: ui.AbstractItemModel):
self._vehicle_dropdown = vehicle_dropdown_model
def set_vehicle_id_field(self, vehicle_id_field: ui.AbstractValueModel):
self._vehicle_id_field = vehicle_id_field
def set_streaming_backend(self, backend: str = "px4"):
carb.log_info("Chosen option: " + backend)
self._streaming_backend = backend
def set_px4_autostart_checkbox(self, checkbox_model:ui.AbstractValueModel):
self._px4_autostart_checkbox = checkbox_model
def set_px4_directory_field(self, directory_field_model: ui.AbstractValueModel):
self._px4_directory_field = directory_field_model
def set_px4_airframe_field(self, airframe_field_model: ui.AbstractValueModel):
self._px4_airframe_field = airframe_field_model
"""
---------------------------------------------------------------------
Callbacks to handle user interaction with the extension widget window
---------------------------------------------------------------------
"""
def on_load_scene(self):
"""
Method that should be invoked when the button to load the selected world is pressed
"""
# Check if a scene is selected in the drop-down menu
if self._scene_dropdown is not None:
# Get the id of the selected environment from the list
environemnt_index = self._scene_dropdown.get_item_value_model().as_int
# Get the name of the selected world
selected_world = self._scene_names[environemnt_index]
# Try to spawn the selected world
asyncio.ensure_future(self._pegasus_sim.load_environment_async(SIMULATION_ENVIRONMENTS[selected_world], force_clear=True))
def on_set_new_global_coordinates(self):
"""
Method that gets invoked to set new global coordinates for this simulation
"""
self._pegasus_sim.set_global_coordinates(
self._latitude_field.get_value_as_float(),
self._longitude_field.get_value_as_float(),
self._altitude_field.get_value_as_float())
def on_reset_global_coordinates(self):
"""
Method that gets invoked to set the global coordinates to the defaults saved in the extension configuration file
"""
self._pegasus_sim.set_default_global_coordinates()
self._latitude_field.set_value(self._pegasus_sim.latitude)
self._longitude_field.set_value(self._pegasus_sim.longitude)
self._altitude_field.set_value(self._pegasus_sim.altitude)
def on_set_new_default_global_coordinates(self):
"""
Method that gets invoked to set new defualt global coordinates for this simulation. This will attempt
to save the current coordinates as new defaults for the extension itself
"""
self._pegasus_sim.set_new_default_global_coordinates(
self._latitude_field.get_value_as_float(),
self._longitude_field.get_value_as_float(),
self._altitude_field.get_value_as_float()
)
def on_clear_scene(self):
"""
Method that should be invoked when the clear world button is pressed
"""
self._pegasus_sim.clear_scene()
def on_load_vehicle(self):
"""
Method that should be invoked when the button to load the selected vehicle is pressed
"""
async def async_load_vehicle():
# Check if we already have a physics environment activated. If not, then activate it
# and only after spawn the vehicle. This is to avoid trying to spawn a vehicle without a physics
# environment setup. This way we can even spawn a vehicle in an empty world and it won't care
if hasattr(self._pegasus_sim.world, "_physics_context") == False:
await self._pegasus_sim.world.initialize_simulation_context_async()
# Check if a vehicle is selected in the drop-down menu
if self._vehicle_dropdown is not None and self._window is not None:
# Get the id of the selected vehicle from the list
vehicle_index = self._vehicle_dropdown.get_item_value_model().as_int
# Get the name of the selected vehicle
selected_robot = self._vehicles_names[vehicle_index]
# Get the id of the selected vehicle
self._vehicle_id = self._vehicle_id_field.get_value_as_int()
# Get the desired position and orientation of the vehicle from the UI transform
pos, euler_angles = self._window.get_selected_vehicle_attitude()
# Read if we should auto-start px4 from the checkbox
px4_autostart = self._px4_autostart_checkbox.get_value_as_bool()
# Read the PX4 path from the field
px4_path = os.path.expanduser(self._px4_directory_field.get_value_as_string())
# Read the PX4 airframe from the field
px4_airframe = self._px4_airframe_field.get_value_as_string()
# Create the multirotor configuration
mavlink_config = MavlinkBackendConfig({
"vehicle_id": self._vehicle_id,
"px4_autolaunch": px4_autostart,
"px4_dir": px4_path,
"px4_vehicle_model": px4_airframe
})
config_multirotor = MultirotorConfig()
config_multirotor.backends = [MavlinkBackend(mavlink_config)]
#ros2 = ROS2Backend(self._vehicle_id)
# Try to spawn the selected robot in the world to the specified namespace
Multirotor(
"/World/quadrotor",
ROBOTS[selected_robot],
self._vehicle_id,
pos,
Rotation.from_euler("XYZ", euler_angles, degrees=True).as_quat(),
config=config_multirotor,
)
# Log that a vehicle of the type multirotor was spawned in the world via the extension UI
carb.log_info("Spawned the robot: " + selected_robot + " using the Pegasus Simulator UI")
else:
# Log that it was not possible to spawn the vehicle in the world using the Pegasus Simulator UI
carb.log_error("Could not spawn the robot using the Pegasus Simulator UI")
# Run the actual vehicle spawn async so that the UI does not freeze
asyncio.ensure_future(async_load_vehicle())
def on_set_viewport_camera(self):
"""
Method that should be invoked when the button to set the viewport camera pose is pressed
"""
carb.log_warn("The viewport camera pose has been adjusted")
if self._window:
# Get the current camera position value
camera_position, camera_target = self._window.get_selected_camera_pos()
if camera_position is not None and camera_target is not None:
# Set the camera view to a fixed value
self._pegasus_sim.set_viewport_camera(eye=camera_position, target=camera_target)
def on_set_new_default_px4_path(self):
"""
Method that will try to update the new PX4 autopilot path with whatever is passed on the string field
"""
carb.log_warn("A new default PX4 Path will be set for the extension.")
# Read the current path from the field
path = self._px4_directory_field.get_value_as_string()
# Set the path using the pegasus interface
self._pegasus_sim.set_px4_path(path)
def on_reset_px4_path(self):
"""
Method that will reset the string field to the default PX4 path
"""
carb.log_warn("Reseting the path to the default one")
self._px4_directory_field.set_value(self._pegasus_sim.px4_path)
| 11,596 | Python | 41.324817 | 134 | 0.643412 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/ui/__init__.py | # Copyright (c) 2023, Marcelo Jacinto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from .ui_delegate import UIDelegate
from .ui_window import WidgetWindow
| 175 | Python | 24.142854 | 39 | 0.777143 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/logic/vehicle_manager.py | """
| File: vehicle_manager.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Definition of the VehicleManager class - a singleton used to manage the vehiles that are spawned in the simulation world
"""
__all__ = ["VehicleManager"]
import carb
from threading import Lock
class VehicleManager:
"""The VehicleManager class is implemented following a singleton pattern. This means that once a vehicle is spawned
on the world or an instance of the VehicleManager is created, no either will be running at the same time.
This class keeps track of all the vehicles that are spawned in the simulation world, either trough the extension UI
or via Python script. Every time a new vehicle object is created, the 'add_vehicle' method is invoked. Additionally,
a vehicle is removed, i.e. 'remove_vehicle' gets invoked, every time the '__del__' function of the "Vehicle" object
gets invoked.
"""
# The object instance of the Vehicle Manager
_instance = None
_is_initialized = False
# A dictionary of vehicles that are spawned in the simulator
_vehicles = {}
# Lock for safe multi-threading
_lock: Lock = Lock()
def __init__(self):
"""
Constructor for the vehicle manager class.
"""
pass
"""
Properties
"""
@property
def vehicles(self):
"""
Returns:
(list) List of vehicles that were spawned.
"""
return VehicleManager._vehicles
"""
Operations
"""
@staticmethod
def get_vehicle_manager():
"""
Method that returns the current vehicle manager.
"""
return VehicleManager()
def add_vehicle(self, stage_prefix: str, vehicle):
"""
Method that adds the vehicles to the vehicle manager.
Args:
stage_prefix (str): A string with the name that the vehicle is spawned in the simulator
vehicle (Vehicle): The vehicle object being added to the vehicle manager.
"""
VehicleManager._vehicles[stage_prefix] = vehicle
def get_vehicle(self, stage_prefix: str):
"""Method that returns the vehicle object given its stage prefix. Returns None if there is no vehicle
associated with that stage prefix
Args:
stage_prefix (str): A string with the name that the vehicle is spawned in the simulator
Returns:
Vehicle: The vehicle object associated with the stage_prefix
"""
return VehicleManager._vehicles.get(stage_prefix, None)
def remove_vehicle(self, stage_prefix: str):
"""
Method that deletes a vehicle from the vehicle manager.
Args:
stage_prefix (str): A string with the name that the vehicle is spawned in the simulator.
"""
try:
VehicleManager._vehicles.pop(stage_prefix)
except:
pass
def remove_all_vehicles(self):
"""
Method that will delete all the vehicles that were spawned from the vehicle manager.
"""
VehicleManager._vehicles.clear()
def __new__(cls):
"""Method that allocated memory for a new vehicle_manager. Since the VehicleManager follows a singleton pattern,
only one instance of VehicleManger object can be in memory at any time.
Returns:
VehicleManger: the single instance of the VehicleManager class.
"""
# Use a lock in here to make sure we do not have a race condition
# when using multi-threading and creating the first instance of the VehicleManager
with cls._lock:
if cls._instance is None:
cls._instance = object.__new__(cls)
else:
carb.log_info("Vehicle Manager is defined already, returning the previously defined one")
return VehicleManager._instance
def __del__(self):
"""Destructor for the object"""
VehicleManager._instance = None
return
| 4,124 | Python | 31.738095 | 135 | 0.640883 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/logic/state.py | """
| File: state.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Describes the state of a vehicle (or rigidbody).
"""
__all__ = ["State"]
import numpy as np
from scipy.spatial.transform import Rotation
from pegasus.simulator.logic.rotations import rot_ENU_to_NED, rot_FLU_to_FRD
class State:
"""
Stores the state of a given vehicle.
Note:
- position - A numpy array with the [x,y,z] of the vehicle expressed in the inertial frame according to an ENU convention.
- orientation - A numpy array with the quaternion [qx, qy, qz, qw] that encodes the attitude of the vehicle's FLU body frame, relative to an ENU inertial frame, expressed in the ENU inertial frame.
- linear_velocity - A numpy array with [vx,vy,vz] that defines the velocity of the vehicle expressed in the inertial frame according to an ENU convention.
- linear_body_velocity - A numpy array with [u,v,w] that defines the velocity of the vehicle expressed in the FLU body frame.
- angular_velocity - A numpy array with [p,q,r] with the angular velocity of the vehicle's FLU body frame, relative to an ENU inertial frame, expressed in the FLU body frame.
- linear acceleration - An array with [x_ddot, y_ddot, z_ddot] with the acceleration of the vehicle expressed in the inertial frame according to an ENU convention.
"""
def __init__(self):
"""
Initialize the State object
"""
# The position [x,y,z] of the vehicle's body frame relative to the inertial frame, expressed in the inertial frame
self.position = np.array([0.0, 0.0, 0.0])
# The attitude (orientation) of the vehicle's body frame relative to the inertial frame of reference,
# expressed in the inertial frame. This quaternion should follow the convention [qx, qy, qz, qw], such that "no rotation"
# equates to the quaternion=[0, 0, 0, 1]
self.attitude = np.array([0.0, 0.0, 0.0, 1.0])
# The linear velocity [u,v,w] of the vehicle's body frame expressed in the body frame of reference
self.linear_body_velocity = np.array([0.0, 0.0, 0.0])
# The linear velocity [x_dot, y_dot, z_dot] of the vehicle's body frame expressed in the inertial frame of reference
self.linear_velocity = np.array([0.0, 0.0, 0.0])
# The angular velocity [wx, wy, wz] of the vehicle's body frame relative to the inertial frame, expressed in the body frame
self.angular_velocity = np.array([0.0, 0.0, 0.0])
# The linear acceleration [ax, ay, az] of the vehicle's body frame relative to the inertial frame, expressed in the inertial frame
self.linear_acceleration = np.array([0.0, 0.0, 0.0])
def get_position_ned(self):
"""
Method that, assuming that a state is encoded in ENU standard (the Isaac Sim standard), converts the position
to the NED convention used by PX4 and other onboard flight controllers
Returns:
np.ndarray: A numpy array with the [x,y,z] of the vehicle expressed in the inertial frame according to an NED convention.
"""
return rot_ENU_to_NED.apply(self.position)
def get_attitude_ned_frd(self):
"""
Method that, assuming that a state is encoded in ENU-FLU standard (the Isaac Sim standard), converts the
attitude of the vehicle it to the NED-FRD convention used by PX4 and other onboard flight controllers
Returns:
np.ndarray: A numpy array with the quaternion [qx, qy, qz, qw] that encodes the attitude of the vehicle's FRD body frame, relative to an NED inertial frame, expressed in the NED inertial frame.
"""
attitude_frd_ned = rot_ENU_to_NED * Rotation.from_quat(self.attitude) * rot_FLU_to_FRD
return attitude_frd_ned.as_quat()
def get_linear_body_velocity_ned_frd(self):
"""
Method that, assuming that a state is encoded in ENU-FLU standard (the Isaac Sim standard), converts the
linear body velocity of the vehicle it to the NED-FRD convention used by PX4 and other onboard flight controllers
Returns:
np.ndarray: A numpy array with [u,v,w] that defines the velocity of the vehicle expressed in the FRD body frame.
"""
# Get the linear acceleration in FLU convention
linear_acc_body_flu = Rotation.from_quat(self.attitude).inv().apply(self.linear_acceleration)
# Convert the linear acceleration in the body frame expressed in FLU convention to the FRD convention
return rot_FLU_to_FRD.apply(linear_acc_body_flu)
def get_linear_velocity_ned(self):
"""
Method that, assuming that a state is enconded in ENU-FLU standard (the Isaac Sim standard), converts the
linear velocity expressed in the inertial frame to the NED convention used by PX4 and other onboard flight
controllers
Returns:
np.ndarray: A numpy array with [vx,vy,vz] that defines the velocity of the vehicle expressed in the inertial frame according to a NED convention.
"""
return rot_ENU_to_NED.apply(self.linear_velocity)
def get_angular_velocity_frd(self):
"""
Method that, assuming that a state is enconded in ENU-FLU standard (the Isaac Sim standard), converts the
angular velocity expressed in the body frame to the NED-FRD convention used by PX4 and other onboard flight
controllers
Returns:
np.ndarray: A numpy array with [p,q,r] with the angular velocity of the vehicle's FRD body frame, relative to an NED inertial frame, expressed in the FRD body frame.
"""
return rot_FLU_to_FRD.apply(self.angular_velocity)
def get_linear_acceleration_ned(self):
"""
Method that, assuming that a state is enconded in ENU-FLU standard (the Isaac Sim standard), converts the
linear acceleration expressed in the inertial frame to the NED convention used by PX4 and other onboard flight
controllers
Returns:
np.ndarray: An array with [x_ddot, y_ddot, z_ddot] with the acceleration of the vehicle expressed in the inertial frame according to an NED convention.
"""
return rot_ENU_to_NED.apply(self.linear_acceleration)
| 6,384 | Python | 52.208333 | 205 | 0.684211 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/logic/__init__.py | """
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
from .interface.pegasus_interface import PegasusInterface | 212 | Python | 34.499994 | 82 | 0.768868 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/logic/rotations.py | """
| File: rotations.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Implements utilitary rotations between ENU and NED inertial frame conventions and FLU and FRD body frame conventions.
"""
import numpy as np
from scipy.spatial.transform import Rotation
# Quaternion for rotation between ENU and NED INERTIAL frames
# NED to ENU: +PI/2 rotation about Z (Down) followed by a +PI rotation around X (old North/new East)
# ENU to NED: +PI/2 rotation about Z (Up) followed by a +PI rotation about X (old East/new North)
# This rotation is symmetric, so q_ENU_to_NED == q_NED_to_ENU.
# Note: this quaternion follows the convention [qx, qy, qz, qw]
q_ENU_to_NED = np.array([0.70711, 0.70711, 0.0, 0.0])
# A scipy rotation from the ENU inertial frame to the NED inertial frame of reference
rot_ENU_to_NED = Rotation.from_quat(q_ENU_to_NED)
# Quaternion for rotation between body FLU and body FRD frames
# +PI rotation around X (Forward) axis rotates from Forward, Right, Down (aircraft)
# to Forward, Left, Up (base_link) frames and vice-versa.
# This rotation is symmetric, so q_FLU_to_FRD == q_FRD_to_FLU.
# Note: this quaternion follows the convention [qx, qy, qz, qw]
q_FLU_to_FRD = np.array([1.0, 0.0, 0.0, 0.0])
# A scipe rotation from the FLU body frame to the FRD body frame
rot_FLU_to_FRD = Rotation.from_quat(q_FLU_to_FRD)
| 1,447 | Python | 48.931033 | 132 | 0.735314 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/logic/thrusters/__init__.py | """
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
from .thrust_curve import ThrustCurve
from .quadratic_thrust_curve import QuadraticThrustCurve | 249 | Python | 34.714281 | 82 | 0.779116 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/logic/thrusters/quadratic_thrust_curve.py | """
| File: quadratic_thrust_curve.py
| Author: Marcelo Jacinto ([email protected])
| Descriptio: File that implements a quadratic thrust curve for rotors
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
import numpy as np
from pegasus.simulator.logic.state import State
from pegasus.simulator.logic.thrusters.thrust_curve import ThrustCurve
class QuadraticThrustCurve(ThrustCurve):
"""Class that implements the dynamics of rotors that can be described by a quadratic thrust curve
"""
def __init__(self, config={}):
"""_summary_
Args:
config (dict): A Dictionary that contains all the parameters for configuring the QuadraticThrustCurve - it can be empty or only have some of the parameters used by the QuadraticThrustCurve.
Examples:
The dictionary default parameters are
>>> {"num_rotors": 4,
>>> "rotor_constant": [5.84e-6, 5.84e-6, 5.84e-6, 5.84e-6],
>>> "rolling_moment_coefficient": [1e-6, 1e-6, 1e-6, 1e-6],
>>> "rot_dir": [-1, -1, 1, 1],
>>> "min_rotor_velocity": [0, 0, 0, 0], # rad/s
>>> "max_rotor_velocity": [1100, 1100, 1100, 1100], # rad/s
>>> }
"""
# Get the total number of rotors to simulate
self._num_rotors = config.get("num_rotors", 4)
# The rotor constant used for computing the total thrust produced by the rotor: T = rotor_constant * omega^2
self._rotor_constant = config.get("rotor_constant", [8.54858e-6, 8.54858e-6, 8.54858e-6, 8.54858e-6])
assert len(self._rotor_constant) == self._num_rotors
# The rotor constant used for computing the total torque generated about the vehicle Z-axis
self._rolling_moment_coefficient = config.get("rolling_moment_coefficient", [1e-6, 1e-6, 1e-6, 1e-6])
assert len(self._rolling_moment_coefficient) == self._num_rotors
# Save the rotor direction of rotation
self._rot_dir = config.get("rot_dir", [-1, -1, 1, 1])
assert len(self._rot_dir) == self._num_rotors
# Values for the minimum and maximum rotor velocity in rad/s
self.min_rotor_velocity = config.get("min_rotor_velocity", [0, 0, 0, 0])
assert len(self.min_rotor_velocity) == self._num_rotors
self.max_rotor_velocity = config.get("max_rotor_velocity", [1100, 1100, 1100, 1100])
assert len(self.max_rotor_velocity) == self._num_rotors
# The actual speed references to apply to the vehicle rotor joints
self._input_reference = [0.0 for i in range(self._num_rotors)]
# The actual velocity that each rotor is spinning at
self._velocity = [0.0 for i in range(self._num_rotors)]
# The actual force that each rotor is generating
self._force = [0.0 for i in range(self._num_rotors)]
# The actual rolling moment that is generated on the body frame of the vehicle
self._rolling_moment = 0.0
def set_input_reference(self, input_reference):
"""
Receives as input a list of target angular velocities of each rotor in rad/s
"""
# The target angular velocity of the rotor
self._input_reference = input_reference
def update(self, state: State, dt: float):
"""
Note: the state and dt variables are not used in this implementation, but left
to add support to other rotor models where the total thrust is dependent on
states such as vehicle linear velocity
Args:
state (State): The current state of the vehicle.
dt (float): The time elapsed between the previous and current function calls (s).
"""
rolling_moment = 0.0
# Compute the actual force to apply to the rotors and the rolling moment contribution
for i in range(self._num_rotors):
# Set the actual velocity that each rotor is spinning at (instanenous model - no delay introduced)
# Only apply clipping of the input reference
self._velocity[i] = np.maximum(
self.min_rotor_velocity[i], np.minimum(self._input_reference[i], self.max_rotor_velocity[i])
)
# Set the force using a quadratic thrust curve
self._force[i] = self._rotor_constant[i] * np.power(self._velocity[i], 2)
# Compute the rolling moment coefficient
rolling_moment += self._rolling_moment_coefficient[i] * np.power(self._velocity[i], 2.0) * self._rot_dir[i]
# Update the rolling moment variable
self._rolling_moment = rolling_moment
# Return the forces and velocities on each rotor and total torque applied on the body frame
return self._force, self._velocity, self._rolling_moment
@property
def force(self):
"""The force to apply to each rotor of the vehicle at any given time instant
Returns:
list: A list of forces (in Newton N) to apply to each rotor of the vehicle (on its Z-axis) at any given time instant
"""
return self._force
@property
def velocity(self):
"""The velocity at which each rotor of the vehicle should be rotating at any given time instant
Returns:
list: A list of angular velocities (in rad/s) of each rotor (about its Z-axis) at any given time instant
"""
return self._velocity
@property
def rolling_moment(self):
"""The total rolling moment being generated on the body frame of the vehicle by the rotating propellers
Returns:
float: The total rolling moment to apply to the vehicle body frame (Torque about the Z-axis) in Nm
"""
return self._rolling_moment
@property
def rot_dir(self):
"""The direction of rotation of each rotor of the vehicle
Returns:
list(int): A list with the rotation direction of each rotor (-1 is counter-clockwise and 1 for clockwise)
"""
return self._rot_dir
| 6,097 | Python | 41.643356 | 201 | 0.632606 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/logic/thrusters/thrust_curve.py | """
| File: thrust_curve.py
| Author: Marcelo Jacinto ([email protected])
| Descriptio: File that implements the base interface for defining thrust curves for vehicles
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
from pegasus.simulator.logic.state import State
class ThrustCurve:
"""Class that implements the dynamics of rotors that can be described by a quadratic thrust curve
"""
def __init__(self):
pass
def set_input_reference(self, input_reference):
"""
Receives as input a list of target angular velocities of each rotor in rad/s
"""
pass
def update(self, state: State, dt: float):
"""
Note: the state and dt variables are not used in this implementation, but left
to add support to other rotor models where the total thrust is dependent on
states such as vehicle linear velocity
Args:
state (State): The current state of the vehicle.
dt (float): The time elapsed between the previous and current function calls (s).
"""
pass
@property
def force(self):
"""The force to apply to each rotor of the vehicle at any given time instant
Returns:
list: A list of forces (in Newton N) to apply to each rotor of the vehicle (on its Z-axis) at any given time instant
"""
pass
@property
def velocity(self):
"""The velocity at which each rotor of the vehicle should be rotating at any given time instant
Returns:
list: A list of angular velocities (in rad/s) of each rotor (about its Z-axis) at any given time instant
"""
pass
@property
def rolling_moment(self):
"""The total rolling moment being generated on the body frame of the vehicle by the rotating propellers
Returns:
float: The total rolling moment to apply to the vehicle body frame (Torque about the Z-axis) in Nm
"""
pass
@property
def rot_dir(self):
"""The direction of rotation of each rotor of the vehicle
Returns:
list(int): A list with the rotation direction of each rotor (-1 is counter-clockwise and 1 for clockwise)
"""
pass | 2,307 | Python | 32.941176 | 128 | 0.642826 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/logic/graphs/ros2_camera.py | """
| File: ros2_camera.py
| License: BSD-3-Clause. Copyright (c) 2023, Micah Nye. All rights reserved.
"""
__all__ = ["ROS2Camera"]
import carb
from omni.isaac.core.utils import stage
import omni.graph.core as og
from omni.isaac.core.utils.prims import is_prim_path_valid
from omni.isaac.core.utils.prims import set_targets
from pegasus.simulator.logic.graphs import Graph
from pegasus.simulator.logic.vehicles import Vehicle
import numpy as np
class ROS2Camera(Graph):
"""The class that implements the ROS2 Camera graph. This class inherits the base class Graph.
"""
def __init__(self, camera_prim_path: str, config: dict = {}):
"""Initialize the ROS2 Camera class
Args:
camera_prim_path (str): Path to the camera prim. Global path when it starts with `/`, else local to vehicle prim path
config (dict): A Dictionary that contains all the parameters for configuring the ROS2Camera - it can be empty or only have some of the parameters used by the ROS2Camera.
Examples:
The dictionary default parameters are
>>> {"graph_evaluator": "execution", # type of the omnigraph to create (execution, push)
>>> "resolution": [640, 480], # output video stream resolution in pixels [width, height]
>>> "types": ['rgb', 'camera_info'], # rgb, depth, depth_pcl, instance_segmentation, semantic_segmentation, bbox_2d_tight, bbox_2d_loose, bbox_3d, camera_info
>>> "publish_labels": True, # publish labels for instance_segmentation, semantic_segmentation, bbox_2d_tight, bbox_2d_loose and bbox_3d camera types
>>> "topic": "" # base topic name for the camera (default is camera name in Isaac Sim)
>>> "namespace": "" # namespace for the camera (default is vehicle name in Isaac Sim)
>>> "tf_frame_id": ""} # tf frame id for the camera (default is camera name in Isaac Sim)
"""
# Initialize the Super class "object" attribute
super().__init__(graph_type="ROS2Camera")
# Save camera path, frame id and ros topic name
self._camera_prim_path = camera_prim_path
self._frame_id = camera_prim_path.rpartition("/")[-1] # frame_id of the camera is the last prim path part after `/`
self._base_topic = config.get("topic", "")
self._namespace = config.get("namespace", "")
self._tf_frame_id = config.get("tf_frame_id", "")
# Process the config dictionary
self._graph_evaluator = config.get("graph_evaluator", "execution")
self._resolution = config.get("resolution", [640, 480])
self._types = np.array(config.get("types", ['rgb', 'camera_info']))
self._publish_labels = config.get("publish_labels", True)
def initialize(self, vehicle: Vehicle):
"""Method that initializes the graph of the camera.
Args:
vehicle (Vehicle): The vehicle that this graph is attached to.
"""
# Set the namespace for the camera if non is provided
if self._namespace == "":
self._namespace = f"/{vehicle.vehicle_name}"
# Set the base topic for the camera if non is provided
if self._base_topic == "":
self._base_topic = f"/{self._frame_id}"
# Set the tf frame id for the camera if non is provided
if self._tf_frame_id == "":
self._tf_frame_id = self._frame_id
# Set the prim_path for the camera
if self._camera_prim_path[0] != '/':
self._camera_prim_path = f"{vehicle.prim_path}/{self._camera_prim_path}"
# Create camera prism
if not is_prim_path_valid(self._camera_prim_path):
carb.log_error(f"Cannot create ROS2 Camera graph, the camera prim path \"{self._camera_prim_path}\" is not valid")
return
# Set the prim paths for camera and tf graphs
graph_path = f"{self._camera_prim_path}_pub"
# Graph configuration
if self._graph_evaluator == "execution":
graph_specs = {
"graph_path": graph_path,
"evaluator_name": "execution",
}
elif self._graph_evaluator == "push":
graph_specs = {
"graph_path": graph_path,
"evaluator_name": "push",
"pipeline_stage": og.GraphPipelineStage.GRAPH_PIPELINE_STAGE_ONDEMAND,
}
else:
carb.log_error(f"Cannot create ROS2 Camera graph, graph evaluator type \"{self._graph_evaluator}\" is not valid")
return
# Creating a graph edit configuration with cameraHelper nodes to generate ROS image publishers
keys = og.Controller.Keys
graph_config = {
keys.CREATE_NODES: [
("on_tick", "omni.graph.action.OnTick"),
("create_viewport", "omni.isaac.core_nodes.IsaacCreateViewport"),
("get_render_product", "omni.isaac.core_nodes.IsaacGetViewportRenderProduct"),
("set_viewport_resolution", "omni.isaac.core_nodes.IsaacSetViewportResolution"),
("set_camera", "omni.isaac.core_nodes.IsaacSetCameraOnRenderProduct"),
],
keys.CONNECT: [
("on_tick.outputs:tick", "create_viewport.inputs:execIn"),
("create_viewport.outputs:execOut", "get_render_product.inputs:execIn"),
("create_viewport.outputs:viewport", "get_render_product.inputs:viewport"),
("create_viewport.outputs:execOut", "set_viewport_resolution.inputs:execIn"),
("create_viewport.outputs:viewport", "set_viewport_resolution.inputs:viewport"),
("set_viewport_resolution.outputs:execOut", "set_camera.inputs:execIn"),
("get_render_product.outputs:renderProductPath", "set_camera.inputs:renderProductPath"),
],
keys.SET_VALUES: [
("create_viewport.inputs:viewportId", 0),
("create_viewport.inputs:name", f"{self._namespace}/{self._frame_id}"),
("set_viewport_resolution.inputs:width", self._resolution[0]),
("set_viewport_resolution.inputs:height", self._resolution[1]),
],
}
# Add camerasHelper for each selected camera type
valid_camera_type = False
for camera_type in self._types:
if not camera_type in ["rgb", "depth", "depth_pcl", "semantic_segmentation", "instance_segmentation", "bbox_2d_tight", "bbox_2d_loose", "bbox_3d", "camera_info"]:
continue
camera_helper_name = f"camera_helper_{camera_type}"
graph_config[keys.CREATE_NODES] += [
(camera_helper_name, "omni.isaac.ros2_bridge.ROS2CameraHelper")
]
graph_config[keys.CONNECT] += [
("set_camera.outputs:execOut", f"{camera_helper_name}.inputs:execIn"),
("get_render_product.outputs:renderProductPath", f"{camera_helper_name}.inputs:renderProductPath")
]
graph_config[keys.SET_VALUES] += [
(f"{camera_helper_name}.inputs:nodeNamespace", self._namespace),
(f"{camera_helper_name}.inputs:frameId", self._tf_frame_id),
(f"{camera_helper_name}.inputs:topicName", f"{self._base_topic}/{camera_type}"),
(f"{camera_helper_name}.inputs:type", camera_type)
]
# Publish labels for specific camera types
if self._publish_labels and camera_type in ["semantic_segmentation", "instance_segmentation", "bbox_2d_tight", "bbox_2d_loose", "bbox_3d"]:
graph_config[keys.SET_VALUES] += [
(camera_helper_name + ".inputs:enableSemanticLabels", True),
(camera_helper_name + ".inputs:semanticLabelsTopicName", f"{self._frame_id}/{camera_type}_labels")
]
valid_camera_type = True
if not valid_camera_type:
carb.log_error(f"Cannot create ROS2 Camera graph, no valid camera type was selected")
return
# Create the camera graph
(graph, _, _, _) = og.Controller.edit(
graph_specs,
graph_config
)
# Connect camera to the graphs
set_targets(
prim=stage.get_current_stage().GetPrimAtPath(f"{graph_path}/set_camera"),
attribute="inputs:cameraPrim",
target_prim_paths=[self._camera_prim_path]
)
# Run the ROS Camera graph once to generate ROS image publishers in SDGPipeline
og.Controller.evaluate_sync(graph)
# Also initialize the Super class with updated prim path (only camera graph path)
super().initialize(graph_path)
def camera_topic(self, camera_type: str) -> str:
"""
(str) Path to the camera topic.
Args:
camera_type (str): one of the supported camera output types
Returns:
Camera topic name (str) if the camera type exists, else empty string
"""
return f"{self._namespace}{self._base_topic}/{camera_type}" if camera_type in self._types else ""
def camera_labels_topic(self, camera_type: str) -> str:
"""
(str) Path to the camera labels topic.
Args:
camera_type (str): one of the supported camera output types
Returns:
Camera labels topic name (str) if the camera type exists, else empty string
"""
if not self._publish_labels or \
not camera_type in self._types or \
not camera_type in ["semantic_segmentation", "instance_segmentation", "bbox_2d_tight", "bbox_2d_loose", "bbox_3d"]:
return ""
return f"{self._namespace}{self._base_topic}/{camera_type}_labels"
| 9,941 | Python | 45.896226 | 181 | 0.5933 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/logic/graphs/__init__.py | """
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto and Filip Stec. All rights reserved.
"""
from .graph import Graph
from .ros2_camera import ROS2Camera
| 168 | Python | 23.142854 | 97 | 0.738095 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/logic/graphs/graph.py | """
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto and Filip Stec. All rights reserved.
"""
__all__ = ["Graph"]
class Graph:
"""The base class for implementing OmniGraphs
Attributes:
graph_prim_path
"""
def __init__(self, graph_type: str):
"""Initialize Graph class
Args:
graph_type (str): A name that describes the type of graph
"""
self._graph_type = graph_type
self._graph_prim_path = None
def initialize(self, graph_prim_path: str):
"""
Method that should be implemented and called by the class that inherits the graph object.
"""
self._graph_prim_path = graph_prim_path
@property
def graph_type(self) -> str:
"""
(str) A name that describes the type of graph.
"""
return self._graph_type
@property
def graph_prim_path(self) -> str:
"""
(str) Path to the graph.
"""
return self._graph_prim_path
| 1,011 | Python | 24.299999 | 97 | 0.568744 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/logic/sensors/magnetometer.py | """
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Simulates a magnetometer. Based on the original implementation provided in PX4 stil_gazebo (https://github.com/PX4/PX4-SITL_gazebo) by Elia Tarasov
"""
__all__ = ["Magnetometer"]
import numpy as np
from scipy.spatial.transform import Rotation
from pegasus.simulator.logic.state import State
from pegasus.simulator.logic.sensors import Sensor
from pegasus.simulator.logic.rotations import rot_ENU_to_NED, rot_FLU_to_FRD
from pegasus.simulator.logic.sensors.geo_mag_utils import (
get_mag_declination,
get_mag_inclination,
get_mag_strength,
reprojection,
)
class Magnetometer(Sensor):
"""The class that implements a magnetometer sensor. This class inherits the base class Sensor.
"""
def __init__(self, config={}):
"""Initialize the Magnetometer class
Args:
config (dict): A Dictionary that contains all the parameters for configuring the Magnetometer - it can be empty or only have some of the parameters used by the Magnetometer.
Examples:
The dictionary default parameters are
>>> {"noise_density": 0.4e-3, # gauss / sqrt(hz)
>>> "random_walk": 6.4e-6, # gauss * sqrt(hz)
>>> "bias_correlation_time": 6.0e2, # s
>>> "update_rate": 250.0} # Hz
"""
# Initialize the Super class "object" attributes
super().__init__(sensor_type="Magnetometer", update_rate=config.get("update_rate", 250.0))
# Set the noise parameters
self._bias: np.ndarray = np.array([0.0, 0.0, 0.0])
self._noise_density = config.get("noise_density", 0.4e-3) # gauss / sqrt(hz)
self._random_walk = config.get("random_walk", 6.4e-6) # gauss * sqrt(hz)
self._bias_correlation_time = config.get("bias_correlation_time", 6.0e2) # s
# Initial state measured by the Magnetometer
self._state = {"magnetic_field": np.zeros((3,))}
@property
def state(self):
"""
(dict) The 'state' of the sensor, i.e. the data produced by the sensor at any given point in time
"""
return self._state
@Sensor.update_at_rate
def update(self, state: State, dt: float):
"""Method that implements the logic of a magnetometer. In this method we start by computing the projection
of the vehicle body frame such in the elipsoidal model of the earth in order to get its current latitude and
longitude. From here the declination and inclination are computed and used to get the strength of the magnetic
field, expressed in the inertial frame of reference (in ENU convention). This magnetic field is then rotated
to the body frame such that it becomes expressed in a FRD body frame relative to a NED inertial reference frame.
(The convention adopted by PX4). Random noise and bias are added to this magnetic field.
Args:
state (State): The current state of the vehicle.
dt (float): The time elapsed between the previous and current function calls (s).
Returns:
(dict) A dictionary containing the current state of the sensor (the data produced by the sensor)
"""
# Get the latitude and longitude from the current state
latitude, longitude = reprojection(state.position, np.radians(self._origin_lat), np.radians(self._origin_lon))
# Magnetic declination and inclination (radians)
declination_rad: float = np.radians(get_mag_declination(np.degrees(latitude), np.degrees(longitude)))
inclination_rad: float = np.radians(get_mag_inclination(np.degrees(latitude), np.degrees(longitude)))
# Compute the magnetic strength (10^5xnanoTesla)
strength_ga: float = 0.01 * get_mag_strength(np.degrees(latitude), np.degrees(longitude))
# Compute the Magnetic filed components according to: http://geomag.nrcan.gc.ca/mag_fld/comp-en.php
H: float = strength_ga * np.cos(inclination_rad)
Z: float = np.tan(inclination_rad) * H
X: float = H * np.cos(declination_rad)
Y: float = H * np.sin(declination_rad)
# Magnetic field of a body following a front-left-up (FLU) convention expressed in a East-North-Up (ENU) inertial frame
magnetic_field_inertial: np.ndarray = np.array([X, Y, Z])
# Rotate the magnetic field vector such that it expresses a field of a body frame according to the front-right-down (FRD)
# expressed in a North-East-Down (NED) inertial frame (the standard used in magnetometer units)
attitude_flu_enu = Rotation.from_quat(state.attitude)
# Rotate the magnetic field from the inertial frame to the body frame of reference according to the FLU frame convention
rot_body_to_world = rot_ENU_to_NED * attitude_flu_enu * rot_FLU_to_FRD.inv()
# The magnetic field expressed in the body frame according to the front-right-down (FRD) convention
magnetic_field_body = rot_body_to_world.inv().apply(magnetic_field_inertial)
# -------------------------------
# Add noise to the magnetic field
# -------------------------------
tau = self._bias_correlation_time
# Discrete-time standard deviation equivalent to an "integrating" sampler with integration time dt.
sigma_d: float = 1 / np.sqrt(dt) * self._noise_density
sigma_b: float = self._random_walk
# Compute exact covariance of the process after dt [Maybeck 4-114].
sigma_b_d: float = np.sqrt(-sigma_b * sigma_b * tau / 2.0 * (np.exp(-2.0 * dt / tau) - 1.0))
# Compute state-transition.
phi_d: float = np.exp(-1.0 / tau * dt)
# Add the noise to the magnetic field
magnetic_field_noisy: np.ndarray = np.zeros((3,))
for i in range(3):
self._bias[i] = phi_d * self._bias[i] + sigma_b_d * np.random.randn()
magnetic_field_noisy[i] = magnetic_field_body[i] + sigma_d * np.random.randn() + self._bias[i]
# Add the values to the dictionary and return it
self._state = {"magnetic_field": [magnetic_field_noisy[0], magnetic_field_noisy[1], magnetic_field_noisy[2]]}
return self._state
| 6,384 | Python | 48.115384 | 185 | 0.649593 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/logic/sensors/geo_mag_utils.py | """
| File: geo_mag_utils.py
| Description: Provides utilities for computing latitude, longitude, and magnetic strength
given the position of the vehicle in the simulated world. These computations and table constants are in agreement
with the PX4 stil_gazebo implementation (https://github.com/PX4/PX4-SITL_gazebo). Therefore, PX4 should behave similarly
to a gazebo-based simulation.
"""
import numpy as np
# Declare which functions are visible from this file
__all__ = ["get_mag_declination", "get_mag_inclination", "get_mag_strength", "reprojection", "GRAVITY_VECTOR"]
# --------------------------------------------------------------------
# Magnetic field data from WMM2018 (10^5xnanoTesla (N, E D) n-frame )
# --------------------------------------------------------------------
# Declination data in degrees
DECLINATION_TABLE = [
[ 47,46,45,43,42,41,39,37,33,29,23,16,10,4,-1,-6,-10,-15,-20,-27,-34,-42,-49,-56,-62,-67,-72,-74,-75,-73,-61,-22,26,42,47,48,47 ],
[ 31,31,31,30,30,30,30,29,27,24,18,11,3,-4,-9,-13,-15,-18,-21,-27,-33,-40,-47,-52,-56,-57,-56,-52,-44,-30,-14,2,14,22,27,30,31 ],
[ 22,23,23,23,22,22,22,23,22,19,13,5,-4,-12,-17,-20,-22,-22,-23,-25,-30,-36,-41,-45,-46,-44,-39,-31,-21,-11,-3,4,10,15,19,21,22 ],
[ 17,17,17,18,17,17,17,17,16,13,8,-1,-10,-18,-22,-25,-26,-25,-22,-20,-21,-25,-29,-32,-31,-28,-23,-16,-9,-3,0,4,7,11,14,16,17 ],
[ 13,13,14,14,14,13,13,12,11,9,3,-5,-14,-20,-24,-25,-24,-21,-17,-12,-9,-11,-14,-17,-18,-16,-12,-8,-3,-0,1,3,6,8,11,12,13 ],
[ 11,11,11,11,11,10,10,10,9,6,-0,-8,-15,-21,-23,-22,-19,-15,-10,-5,-2,-2,-4,-7,-9,-8,-7,-4,-1,1,1,2,4,7,9,10,11 ],
[ 10,9,9,9,9,9,9,8,7,3,-3,-10,-16,-20,-20,-18,-14,-9,-5,-2,1,2,0,-2,-4,-4,-3,-2,-0,0,0,1,3,5,7,9,10 ],
[ 9,9,9,9,9,9,9,8,6,1,-4,-11,-16,-18,-17,-14,-10,-5,-2,-0,2,3,2,0,-1,-2,-2,-1,-0,-1,-1,-1,1,3,6,8,9 ],
[ 8,9,9,10,10,10,10,8,5,0,-6,-12,-15,-16,-15,-11,-7,-4,-1,1,3,4,3,2,1,0,-0,-0,-1,-2,-3,-4,-2,0,3,6,8 ],
[ 7,9,10,11,12,12,12,9,5,-1,-7,-13,-15,-15,-13,-10,-6,-3,0,2,3,4,4,4,3,2,1,0,-1,-3,-5,-6,-6,-3,0,4,7 ],
[ 5,8,11,13,14,15,14,11,5,-2,-9,-15,-17,-16,-13,-10,-6,-3,0,3,4,5,6,6,6,5,4,2,-1,-5,-8,-9,-9,-6,-3,1,5 ],
[ 3,8,11,15,17,17,16,12,5,-4,-12,-18,-19,-18,-16,-12,-8,-4,-0,3,5,7,9,10,10,9,7,4,-1,-6,-10,-12,-12,-9,-5,-1,3 ],
[ 3,8,12,16,19,20,18,13,4,-8,-18,-24,-25,-23,-20,-16,-11,-6,-1,3,7,11,14,16,17,17,14,8,-0,-8,-13,-15,-14,-11,-7,-2,3 ]]
# Inclination data in degrees
INCLINATION_TABLE = [
[ -78,-76,-74,-72,-70,-68,-65,-63,-60,-57,-55,-54,-54,-55,-56,-57,-58,-59,-59,-59,-59,-60,-61,-63,-66,-69,-73,-76,-79,-83,-86,-87,-86,-84,-82,-80,-78 ],
[ -72,-70,-68,-66,-64,-62,-60,-57,-54,-51,-49,-48,-49,-51,-55,-58,-60,-61,-61,-61,-60,-60,-61,-63,-66,-69,-72,-76,-78,-80,-81,-80,-79,-77,-76,-74,-72 ],
[ -64,-62,-60,-59,-57,-55,-53,-50,-47,-44,-41,-41,-43,-47,-53,-58,-62,-65,-66,-65,-63,-62,-61,-63,-65,-68,-71,-73,-74,-74,-73,-72,-71,-70,-68,-66,-64 ],
[ -55,-53,-51,-49,-46,-44,-42,-40,-37,-33,-30,-30,-34,-41,-48,-55,-60,-65,-67,-68,-66,-63,-61,-61,-62,-64,-65,-66,-66,-65,-64,-63,-62,-61,-59,-57,-55 ],
[ -42,-40,-37,-35,-33,-30,-28,-25,-22,-18,-15,-16,-22,-31,-40,-48,-55,-59,-62,-63,-61,-58,-55,-53,-53,-54,-55,-55,-54,-53,-51,-51,-50,-49,-47,-45,-42 ],
[ -25,-22,-20,-17,-15,-12,-10,-7,-3,1,3,2,-5,-16,-27,-37,-44,-48,-50,-50,-48,-44,-41,-38,-38,-38,-39,-39,-38,-37,-36,-35,-35,-34,-31,-28,-25 ],
[ -5,-2,1,3,5,8,10,13,16,20,21,19,12,2,-10,-20,-27,-30,-30,-29,-27,-23,-19,-17,-17,-17,-18,-18,-17,-16,-16,-16,-16,-15,-12,-9,-5 ],
[ 15,18,21,22,24,26,29,31,34,36,37,34,28,20,10,2,-3,-5,-5,-4,-2,2,5,7,8,7,7,6,7,7,7,6,5,6,8,11,15 ],
[ 31,34,36,38,39,41,43,46,48,49,49,46,42,36,29,24,20,19,20,21,23,25,28,30,30,30,29,29,29,29,28,27,25,25,26,28,31 ],
[ 43,45,47,49,51,53,55,57,58,59,59,56,53,49,45,42,40,40,40,41,43,44,46,47,47,47,47,47,47,47,46,44,42,41,40,42,43 ],
[ 53,54,56,57,59,61,64,66,67,68,67,65,62,60,57,55,55,54,55,56,57,58,59,59,60,60,60,60,60,60,59,57,55,53,52,52,53 ],
[ 62,63,64,65,67,69,71,73,75,75,74,73,70,68,67,66,65,65,65,66,66,67,68,68,69,70,70,71,71,70,69,67,65,63,62,62,62 ],
[ 71,71,72,73,75,77,78,80,81,81,80,79,77,76,74,73,73,73,73,73,73,74,74,75,76,77,78,78,78,78,77,75,73,72,71,71,71 ]]
# Strength data in centi-Tesla
STRENGTH_TABLE = [
[ 62,60,58,56,54,52,49,46,43,41,38,36,34,32,31,31,30,30,30,31,33,35,38,42,46,51,55,59,62,64,66,67,67,66,65,64,62 ],
[ 59,56,54,52,50,47,44,41,38,35,32,29,28,27,26,26,26,25,25,26,28,30,34,39,44,49,54,58,61,64,65,66,65,64,63,61,59 ],
[ 54,52,49,47,45,42,40,37,34,30,27,25,24,24,24,24,24,24,24,24,25,28,32,37,42,48,52,56,59,61,62,62,62,60,59,56,54 ],
[ 49,47,44,42,40,37,35,33,30,28,25,23,22,23,23,24,25,25,26,26,26,28,31,36,41,46,51,54,56,57,57,57,56,55,53,51,49 ],
[ 43,41,39,37,35,33,32,30,28,26,25,23,23,23,24,25,26,28,29,29,29,30,32,36,40,44,48,51,52,52,51,51,50,49,47,45,43 ],
[ 38,36,35,33,32,31,30,29,28,27,26,25,24,24,25,26,28,30,31,32,32,32,33,35,38,42,44,46,47,46,45,45,44,43,41,40,38 ],
[ 34,33,32,32,31,31,31,30,30,30,29,28,27,27,27,28,29,31,32,33,33,33,34,35,37,39,41,42,43,42,41,40,39,38,36,35,34 ],
[ 33,33,32,32,33,33,34,34,35,35,34,33,32,31,30,30,31,32,33,34,35,35,36,37,38,40,41,42,42,41,40,39,37,36,34,33,33 ],
[ 34,34,34,35,36,37,39,40,41,41,40,39,37,35,35,34,35,35,36,37,38,39,40,41,42,43,44,45,45,45,43,41,39,37,35,34,34 ],
[ 37,37,38,39,41,42,44,46,47,47,46,45,43,41,40,39,39,40,41,41,42,43,45,46,47,48,49,50,50,50,48,46,43,41,39,38,37 ],
[ 42,42,43,44,46,48,50,52,53,53,52,51,49,47,45,45,44,44,45,46,46,47,48,50,51,53,54,55,56,55,54,52,49,46,44,43,42 ],
[ 48,48,49,50,52,53,55,56,57,57,56,55,53,51,50,49,48,48,48,49,49,50,51,53,55,56,58,59,60,60,58,56,54,52,50,49,48 ],
[ 54,54,54,55,56,57,58,58,59,58,58,57,56,54,53,52,51,51,51,51,52,53,54,55,57,58,60,61,62,61,61,59,58,56,55,54,54 ]]
SAMPLING_RES = 10.0
SAMPLING_MIN_LAT = -60 # deg
SAMPLING_MAX_LAT = 60 # deg
SAMPLING_MIN_LON = -180 # deg
SAMPLING_MAX_LON = 180 # deg
EARTH_RADIUS = 6353000.0 # meters
# Gravity vector expressed in ENU
GRAVITY_VECTOR = np.array([0.0, 0.0, -9.80665]) # m/s^2
def get_lookup_table_index(val: int, min: int, max: int):
# for the rare case of hitting the bounds exactly
# the rounding logic wouldn't fit, so enforce it.
# limit to table bounds - required for maxima even when table spans full globe range
# limit to (table bounds - 1) because bilinear interpolation requires checking (index + 1)
val = np.clip(val, min, max - SAMPLING_RES)
return int((-min + val) / SAMPLING_RES)
def get_table_data(lat: float, lon: float, table):
# If the values exceed valid ranges, return zero as default
# as we have no way of knowing what the closest real value
# would be.
if lat < -90.0 or lat > 90.0 or lon < -180.0 or lon > 180.0:
return 0.0
# round down to nearest sampling resolution
min_lat = int(lat / SAMPLING_RES) * SAMPLING_RES
min_lon = int(lon / SAMPLING_RES) * SAMPLING_RES
# find index of nearest low sampling point
min_lat_index = get_lookup_table_index(min_lat, SAMPLING_MIN_LAT, SAMPLING_MAX_LAT)
min_lon_index = get_lookup_table_index(min_lon, SAMPLING_MIN_LON, SAMPLING_MAX_LON)
data_sw = table[min_lat_index][min_lon_index]
data_se = table[min_lat_index][min_lon_index + 1]
data_ne = table[min_lat_index + 1][min_lon_index + 1]
data_nw = table[min_lat_index + 1][min_lon_index]
# perform bilinear interpolation on the four grid corners
lat_scale = np.clip((lat - min_lat) / SAMPLING_RES, 0.0, 1.0)
lon_scale = np.clip((lon - min_lon) / SAMPLING_RES, 0.0, 1.0)
data_min = lon_scale * (data_se - data_sw) + data_sw
data_max = lon_scale * (data_ne - data_nw) + data_nw
return lat_scale * (data_max - data_min) + data_min
def get_mag_declination(latitude: float, longitude: float):
return get_table_data(latitude, longitude, DECLINATION_TABLE)
def get_mag_inclination(latitude: float, longitude: float):
return get_table_data(latitude, longitude, INCLINATION_TABLE)
def get_mag_strength(latitude: float, longitude: float):
return get_table_data(latitude, longitude, STRENGTH_TABLE)
def reprojection(position: np.ndarray, origin_lat=-999, origin_long=-999):
"""
Compute the latitude and longitude coordinates from a local position
"""
# reproject local position to gps coordinates
x_rad: float = position[1] / EARTH_RADIUS # north
y_rad: float = position[0] / EARTH_RADIUS # east
c: float = np.sqrt(x_rad * x_rad + y_rad * y_rad)
sin_c: float = np.sin(c)
cos_c: float = np.cos(c)
if c != 0.0:
latitude_rad = np.arcsin(cos_c * np.sin(origin_lat) + (x_rad * sin_c * np.cos(origin_lat)) / c)
longitude_rad = origin_long + np.arctan2(y_rad * sin_c, c * np.cos(origin_lat) * cos_c - x_rad * np.sin(origin_lat) * sin_c)
else:
latitude_rad = origin_lat
longitude_rad = origin_long
return latitude_rad, longitude_rad
| 8,992 | Python | 58.953333 | 156 | 0.590747 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/logic/sensors/sensor.py | """
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Definition of the Sensor class which is used as the base for all the sensors.
"""
__all__ = ["Sensor"]
from pegasus.simulator.logic.state import State
class Sensor:
"""The base class for implementing a sensor
Attributes:
update_period (float): The period for each sensor update: update_period = 1 / update_rate (in s).
origin_lat (float): The latitude of the origin of the world in degrees (might get used by some sensors).
origin_lon (float): The longitude of the origin of the world in degrees (might get used by some sensors).
origin_alt (float): The altitude of the origin of the world relative to sea water level (might get used by some sensors)
"""
def __init__(self, sensor_type: str, update_rate: float):
"""Initialize the Sensor class
Args:
sensor_type (str): A name that describes the type of sensor
update_rate (float): The rate at which the data in the sensor should be refreshed (in Hz)
"""
# Set the sensor type and update rate
self._sensor_type = sensor_type
self._update_rate = update_rate
self._update_period = 1.0 / self._update_rate
# Auxiliar variables used to control whether to update the sensor or not given the time elapsed
self._first_update = True
self._total_time = 0.0
# Set the "configuration of the world" - some sensors might need it
self._origin_lat = -999
self._origin_lon = -999
self._origin_alt = 0.0
def initialize(self, origin_lat, origin_lon, origin_alt):
"""Method that initializes the sensor latitude, longitude and altitude attributes.
Note:
Given that some sensors require the knowledge of the latitude, longitude and altitude of the [0, 0, 0] coordinate
of the world, then we might as well just save this information for whatever sensor that comes
Args:
origin_lat (float): The latitude of the origin of the world in degrees (might get used by some sensors).
origin_lon (float): The longitude of the origin of the world in degrees (might get used by some sensors).
origin_alt (float): The altitude of the origin of the world relative to sea water level (might get used by some sensors).
"""
self._origin_lat = origin_lat
self._origin_lon = origin_lon
self._origin_alt = origin_alt
def set_update_rate(self, update_rate: float):
"""Method that changes the update rate and period of the sensor
Args:
update_rate (float): The new rate at which the data in the sensor should be refreshed (in Hz)
"""
self._update_rate = update_rate
self._update_period = 1.0 / self._update_rate
def update_at_rate(fnc):
"""Decorator function used to check if the time elapsed between the last sensor update call and the current
sensor update call is higher than the defined update_rate of the sensor. If so, we need to actually compute new
values to simulate a measurement of the sensor at a given rate.
Args:
fnc (function): The function that we want to enforce a specific update rate.
Examples:
>>> class GPS(Sensor):
>>> @Sensor.update_at_rate
>>> def update(self):
>>> (do some logic here)
Returns:
[None, Dict]: This decorator function returns None if there was no data to be produced by the sensor at the
specified timestamp or a dict with the current state of the sensor otherwise.
"""
#
# Define a wrapper function so that the "self" of the object can be passed to the function as well
def wrapper(self, state: State, dt: float):
# Add the total time passed between the last time the sensor was updated and the current call
self._total_time += dt
# If it is time to update the sensor data, then just call the update function of the sensor
if self._total_time >= self._update_period or self._first_update:
# Result of the update function for the sensor
result = fnc(self, state, self._total_time)
# Reset the auxiliar counter variables
self._first_update = False
self._total_time = 0.0
return result
return None
return wrapper
@property
def sensor_type(self):
"""
(str) A name that describes the type of sensor.
"""
return self._sensor_type
@property
def update_rate(self):
"""
(float) The rate at which the data in the sensor should be refreshed (in Hz).
"""
return self._update_rate
@property
def state(self):
"""
(dict) A dictionary which contains the data produced by the sensor at any given time.
"""
return None
def update(self, state: State, dt: float):
"""Method that should be implemented by the class that inherits Sensor. This is where the actual implementation
of the sensor should be performed.
Args:
state (State): The current state of the vehicle.
dt (float): The time elapsed between the previous and current function calls (s).
Returns:
(dict) A dictionary containing the current state of the sensor (the data produced by the sensor)
"""
pass
def config_from_dict(self, config_dict):
"""Method that should be implemented by the class that inherits Sensor. This is where the configuration of the
sensor based on a dictionary input should be performed.
Args:
config_dict (dict): A dictionary containing the configurations of the sensor
"""
pass
| 6,081 | Python | 39.818792 | 133 | 0.627693 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/logic/sensors/barometer.py | """
| File: barometer.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Simulates a barometer. Based on the implementation provided in PX4 stil_gazebo (https://github.com/PX4/PX4-SITL_gazebo) by Elia Tarasov.
| References: Both the original implementation provided in the gazebo based simulation and this one are based on the following article - 'A brief summary of atmospheric modeling', Cavcar, M., http://fisicaatmo.at.fcen.uba.ar/practicas/ISAweb.pdf
"""
__all__ = ["Barometer"]
import numpy as np
from pegasus.simulator.logic.state import State
from pegasus.simulator.logic.sensors import Sensor
from pegasus.simulator.logic.sensors.geo_mag_utils import GRAVITY_VECTOR
DEFAULT_HOME_ALT_AMSL = 488.0
class Barometer(Sensor):
"""The class that implements a barometer sensor. This class inherits the base class Sensor.
"""
def __init__(self, config={}):
"""Initialize the Barometer class
Args:
config (dict): A Dictionary that contains all the parameters for configuring the Barometer - it can be empty or only have some of the parameters used by the Barometer.
Examples:
The dictionary default parameters are
>>> {"temperature_msl": 288.15, # temperature at MSL [K] (15 [C])
>>> "pressure_msl": 101325.0, # pressure at MSL [Pa]
>>> "lapse_rate": 0.0065, # reduction in temperature with altitude for troposphere [K/m]
>>> "air_density_msl": 1.225, # air density at MSL [kg/m^3]
>>> "absolute_zero": -273.15, # [C]
>>> "drift_pa_per_sec": 0.0, # Pa
>>> "update_rate": 250.0} # Hz
"""
# Initialize the Super class "object" attributes
super().__init__(sensor_type="Barometer", update_rate=config.get("update_rate", 250.0))
self._z_start: float = None
# Setup the default home altitude (aka the altitude at the [0.0, 0.0, 0.0] coordinate on the simulated world)
# If desired, the user can override this default by calling the initialize() method defined inside the Sensor
# implementation
self._origin_alt = DEFAULT_HOME_ALT_AMSL
# Define the constants for the barometer
# International standard atmosphere (troposphere model - valid up to 11km) see [1]
self._TEMPERATURE_MSL: float = config.get("temperature_msl", 288.15) # temperature at MSL [K] (15 [C])
self._PRESSURE_MSL: float = config.get("pressure_msl", 101325.0) # pressure at MSL [Pa]
self._LAPSE_RATE: float = config.get(
"lapse_rate", 0.0065
) # reduction in temperature with altitude for troposphere [K/m]
self._AIR_DENSITY_MSL: float = config.get("air_density_msl", 1.225) # air density at MSL [kg/m^3]
self._ABSOLUTE_ZERO_C: float = config.get("absolute_zero", -273.15) # [C]
# Set the drift for the sensor
self._baro_drift_pa_per_sec: float = config.get("drift_pa_per_sec", 0.0)
# Auxiliar variables for generating the noise
self._baro_rnd_use_last: bool = False
self._baro_rnd_y2: float = 0.0
self._baro_drift_pa: float = 0.0
# Save the current state measured by the Baramoter
self._state = {"absolute_pressure": 0.0, "pressure_altitude": 0.0, "temperature": 0.0}
@property
def state(self):
"""
(dict) The 'state' of the sensor, i.e. the data produced by the sensor at any given point in time
"""
return self._state
@Sensor.update_at_rate
def update(self, state: State, dt: float):
"""Method that implements the logic of a barometer. In this method we compute the relative altitude of the vehicle
relative to the origin's altitude. Aditionally, we compute the actual altitude of the vehicle, local temperature and
absolute presure, based on the reference - [A brief summary of atmospheric modeling, Cavcar, M., http://fisicaatmo.at.fcen.uba.ar/practicas/ISAweb.pdf]
Args:
state (State): The current state of the vehicle.
dt (float): The time elapsed between the previous and current function calls (s).
Returns:
(dict) A dictionary containing the current state of the sensor (the data produced by the sensor)
"""
# Set the initial altitude if not yet defined
if self._z_start is None:
self._z_start = state.position[2]
# Compute the temperature at the current altitude
alt_rel: float = state.position[2] - self._z_start
alt_amsl: float = self._origin_alt + alt_rel
temperature_local: float = self._TEMPERATURE_MSL - self._LAPSE_RATE * alt_amsl
# Compute the absolute pressure at local temperature
pressure_ratio: float = np.power(self._TEMPERATURE_MSL / temperature_local, 5.2561)
absolute_pressure: float = self._PRESSURE_MSL / pressure_ratio
# Generate a Gaussian noise sequence using polar form of Box-Muller transformation
# Honestly, this is overkill and will get replaced by numpys random.randn.
if not self._baro_rnd_use_last:
w: float = 1.0
while w >= 1.0:
x1: float = 2.0 * np.random.randn() - 1.0
x2: float = 2.0 * np.random.randn() - 1.0
w = (x1 * x1) + (x2 * x2)
w = np.sqrt((-2.0 * np.log(w)) / w)
y1: float = x1 * w
self._baro_rnd_y2 = x2 * w
self._baro_rnd_use_last = True
else:
y1: float = self._baro_rnd_y2
self._baro_rnd_use_last = False
# Apply noise and drift
abs_pressure_noise: float = y1 # 1 Pa RMS noise
self._baro_drift_pa = self._baro_drift_pa + (self._baro_drift_pa_per_sec * dt) # Update the drift
absolute_pressure_noisy: float = absolute_pressure + abs_pressure_noise + self._baro_drift_pa_per_sec
# Convert to hPa (Note: 1 hPa = 100 Pa)
absolute_pressure_noisy_hpa: float = absolute_pressure_noisy * 0.01
# Compute air density at local temperature
density_ratio: float = np.power(self._TEMPERATURE_MSL / temperature_local, 4.256)
air_density: float = self._AIR_DENSITY_MSL / density_ratio
# Compute pressure altitude including effect of pressure noise
pressure_altitude: float = alt_amsl - (abs_pressure_noise + self._baro_drift_pa) / (np.linalg.norm(GRAVITY_VECTOR) * air_density)
#pressure_altitude: float = alt_amsl - (abs_pressure_noise) / (np.linalg.norm(GRAVITY_VECTOR) * air_density)
# Compute temperature in celsius
temperature_celsius: float = temperature_local + self._ABSOLUTE_ZERO_C
# Add the values to the dictionary and return it
self._state = {
"absolute_pressure": absolute_pressure_noisy_hpa,
"pressure_altitude": pressure_altitude,
"temperature": temperature_celsius,
}
return self._state
| 7,189 | Python | 46.615894 | 245 | 0.626095 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/logic/sensors/gps.py | """
| File: gps.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Simulates a gps. Based on the implementation provided in PX4 stil_gazebo (https://github.com/PX4/PX4-SITL_gazebo) by Amy Wagoner and Nuno Marques
"""
__all__ = ["GPS"]
import numpy as np
from pegasus.simulator.logic.sensors import Sensor
from pegasus.simulator.logic.sensors.geo_mag_utils import reprojection
# TODO - Introduce delay on the GPS data
class GPS(Sensor):
"""The class that implements a GPS sensor. This class inherits the base class Sensor.
"""
def __init__(self, config={}):
"""Initialize the GPS class.
Args:
config (dict): A Dictionary that contains all the parameters for configuring the GPS - it can be empty or only have some of the parameters used by the GPS.
Examples:
The dictionary default parameters are
>>> {"fix_type": 3,
>>> "eph": 1.0,
>>> "epv": 1.0,
>>> "sattelites_visible": 10,
>>> "gps_xy_random_walk": 2.0, # (m/s) / sqrt(hz)
>>> "gps_z_random_walk": 4.0, # (m/s) / sqrt(hz)
>>> "gps_xy_noise_density": 2.0e-4, # (m) / sqrt(hz)
>>> "gps_z_noise_density": 4.0e-4, # (m) / sqrt(hz)
>>> "gps_vxy_noise_density": 0.2, # (m/s) / sqrt(hz)
>>> "gps_vz_noise_density": 0.4, # (m/s) / sqrt(hz)
>>> "gps_correlation_time": 60, # s
>>> "update_rate": 1.0 # Hz
>>> }
"""
# Initialize the Super class "object" attributes
super().__init__(sensor_type="GPS", update_rate=config.get("update_rate", 250.0))
# Define the GPS simulated/fixed values
self._fix_type = config.get("fix_type", 3)
self._eph = config.get("eph", 1.0)
self._epv = config.get("epv", 1.0)
self._sattelites_visible = config.get("sattelites_visible", 10)
# Parameters for GPS random walk
self._random_walk_gps = np.array([0.0, 0.0, 0.0])
self._gps_xy_random_walk = config.get("gps_xy_random_walk", 2.0) # (m/s) / sqrt(hz)
self._gps_z_random_walk = config.get("gps_z_random_walk", 4.0) # (m/s) / sqrt(hz)
# Parameters for the position noise
self._noise_gps_pos = np.array([0.0, 0.0, 0.0])
self._gps_xy_noise_density = config.get("gps_xy_noise_density", 2.0e-4) # (m) / sqrt(hz)
self._gps_z_noise_density = config.get("gps_z_noise_density", 4.0e-4) # (m) / sqrt(hz)
# Parameters for the velocity noise
self._noise_gps_vel = np.array([0.0, 0.0, 0.0])
self._gps_vxy_noise_density = config.get("gps_vxy_noise_density", 0.2) # (m/s) / sqrt(hz)
self._gps_vz_noise_density = config.get("gps_vz_noise_density", 0.4) # (m/s) / sqrt(hz)
# Parameters for the GPS bias
self._gps_bias = np.array([0.0, 0.0, 0.0])
self._gps_correlation_time = config.get("gps_correlation_time", 60)
# Save the current state measured by the GPS (and initialize at the origin)
self._state = {
"latitude": np.radians(self._origin_lat),
"longitude": np.radians(self._origin_lon),
"altitude": self._origin_alt,
"eph": 1.0,
"epv": 1.0,
"speed": 0.0,
"velocity_north": 0.0,
"velocity_east": 0.0,
"velocity_down": 0.0,
# Constant values
"fix_type": self._fix_type,
"eph": self._eph,
"epv": self._epv,
"cog": 0.0,
"sattelites_visible": self._sattelites_visible,
"latitude_gt": np.radians(self._origin_lat),
"longitude_gt": np.radians(self._origin_lon),
"altitude_gt": self._origin_alt,
}
@property
def state(self):
"""
(dict) The 'state' of the sensor, i.e. the data produced by the sensor at any given point in time
"""
return self._state
@Sensor.update_at_rate
def update(self, state: np.ndarray, dt: float):
"""Method that implements the logic of a gps. In this method we start by generating the GPS bias terms which are then
added to the real position of the vehicle, expressed in ENU inertial frame. This position affected by noise
is reprojected in order to obtain the corresponding latitude and longitude. Additionally, to the linear velocity, noise
is added.
Args:
state (State): The current state of the vehicle.
dt (float): The time elapsed between the previous and current function calls (s).
Returns:
(dict) A dictionary containing the current state of the sensor (the data produced by the sensor)
"""
# Update noise parameters
self._random_walk_gps[0] = self._gps_xy_random_walk * np.sqrt(dt) * np.random.randn()
self._random_walk_gps[1] = self._gps_xy_random_walk * np.sqrt(dt) * np.random.randn()
self._random_walk_gps[2] = self._gps_z_random_walk * np.sqrt(dt) * np.random.randn()
self._noise_gps_pos[0] = self._gps_xy_noise_density * np.sqrt(dt) * np.random.randn()
self._noise_gps_pos[1] = self._gps_xy_noise_density * np.sqrt(dt) * np.random.randn()
self._noise_gps_pos[2] = self._gps_z_noise_density * np.sqrt(dt) * np.random.randn()
self._noise_gps_vel[0] = self._gps_vxy_noise_density * np.sqrt(dt) * np.random.randn()
self._noise_gps_vel[1] = self._gps_vxy_noise_density * np.sqrt(dt) * np.random.randn()
self._noise_gps_vel[2] = self._gps_vz_noise_density * np.sqrt(dt) * np.random.randn()
# Perform GPS bias integration (using euler integration -> to be improved)
self._gps_bias[0] = (
self._gps_bias[0] + self._random_walk_gps[0] * dt - self._gps_bias[0] / self._gps_correlation_time
)
self._gps_bias[1] = (
self._gps_bias[1] + self._random_walk_gps[1] * dt - self._gps_bias[1] / self._gps_correlation_time
)
self._gps_bias[2] = (
self._gps_bias[2] + self._random_walk_gps[2] * dt - self._gps_bias[2] / self._gps_correlation_time
)
# reproject position with noise into geographic coordinates
pos_with_noise: np.ndarray = state.position + self._noise_gps_pos + self._gps_bias
latitude, longitude = reprojection(pos_with_noise, np.radians(self._origin_lat), np.radians(self._origin_lon))
# Compute the values of the latitude and longitude without noise (for groundtruth measurements)
latitude_gt, longitude_gt = reprojection(
state.position, np.radians(self._origin_lat), np.radians(self._origin_lon)
)
# Add noise to the velocity expressed in the world frame
velocity: np.ndarray = state.linear_velocity # + self._noise_gps_vel
# Compute the xy speed
speed: float = np.linalg.norm(velocity[:2])
# Course over ground (NOT heading, but direction of movement),
# 0.0..359.99 degrees. If unknown, set to: 65535 [cdeg] (type:uint16_t)
ve = velocity[0]
vn = velocity[1]
cog = np.degrees(np.arctan2(ve, vn))
if cog < 0.0:
cog = cog + 360.0
cog = cog * 100
# Add the values to the dictionary and return it
self._state = {
"latitude": np.degrees(latitude),
"longitude": np.degrees(longitude),
"altitude": state.position[2] + self._origin_alt - self._noise_gps_pos[2] + self._gps_bias[2],
"eph": 1.0,
"epv": 1.0,
"speed": speed,
# Conversion from ENU (standard of Isaac Sim to NED - used in GPS sensors)
"velocity_north": velocity[1],
"velocity_east": velocity[0],
"velocity_down": -velocity[2],
# Constant values
"fix_type": self._fix_type,
"eph": self._eph,
"epv": self._epv,
"cog": 0.0, # cog,
"sattelites_visible": self._sattelites_visible,
"latitude_gt": latitude_gt,
"longitude_gt": longitude_gt,
"altitude_gt": state.position[2] + self._origin_alt,
}
return self._state
| 8,406 | Python | 43.481481 | 167 | 0.571259 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/logic/sensors/imu.py | """
| File: imu.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Simulates an imu. Based on the implementation provided in PX4 stil_gazebo (https://github.com/PX4/PX4-SITL_gazebo)
"""
__all__ = ["IMU"]
import numpy as np
from scipy.spatial.transform import Rotation
from pegasus.simulator.logic.state import State
from pegasus.simulator.logic.sensors import Sensor
from pegasus.simulator.logic.rotations import rot_FLU_to_FRD, rot_ENU_to_NED
from pegasus.simulator.logic.sensors.geo_mag_utils import GRAVITY_VECTOR
class IMU(Sensor):
"""The class that implements the IMU sensor. This class inherits the base class Sensor.
"""
def __init__(self, config={}):
"""Initialize the IMU class
Args:
config (dict): A Dictionary that contains all teh parameters for configuring the IMU - it can be empty or only have some of the parameters used by the IMU.
Examples:
The dictionary default parameters are
>>> {"gyroscope": {
>>> "noise_density": 2.0 * 35.0 / 3600.0 / 180.0 * pi,
>>> "random_walk": 2.0 * 4.0 / 3600.0 / 180.0 * pi,
>>> "bias_correlation_time": 1.0e3,
>>> "turn_on_bias_sigma": 0.5 / 180.0 * pi},
>>> "accelerometer": {
>>> "noise_density": 2.0 * 2.0e-3,
>>> "random_walk": 2.0 * 3.0e-3,
>>> "bias_correlation_time": 300.0,
>>> "turn_on_bias_sigma": 20.0e-3 * 9.8
>>> },
>>> "update_rate": 1.0} # Hz
"""
# Initialize the Super class "object" attributes
super().__init__(sensor_type="IMU", update_rate=config.get("update_rate", 250.0))
# Orientation noise constant
self._orientation_noise: float = 0.0
# Gyroscope noise constants
self._gyroscope_bias: np.ndarray = np.zeros((3,))
gyroscope_config = config.get("gyroscope", {})
self._gyroscope_noise_density = gyroscope_config.get("noise_density", 0.0003393695767766752)
self._gyroscope_random_walk = gyroscope_config.get("random_walk", 3.878509448876288E-05)
self._gyroscope_bias_correlation_time = gyroscope_config.get("bias_correlation_time", 1.0E3)
self._gyroscope_turn_on_bias_sigma = gyroscope_config.get("turn_on_bias_sigma", 0.008726646259971648)
# Accelerometer noise constants
self._accelerometer_bias: np.ndarray = np.zeros((3,))
accelerometer_config = config.get("accelerometer", {})
self._accelerometer_noise_density = accelerometer_config.get("noise_density", 0.004)
self._accelerometer_random_walk = accelerometer_config.get("random_walk", 0.006)
self._accelerometer_bias_correlation_time = accelerometer_config.get("bias_correlation_time", 300.0)
self._accelerometer_turn_on_bias_sigma = accelerometer_config.get("turn_on_bias_sigma", 0.196)
# Auxiliar variable used to compute the linear acceleration of the vehicle
self._prev_linear_velocity = np.zeros((3,))
# Save the current state measured by the IMU
self._state = {
"orientation": np.array([1.0, 0.0, 0.0, 0.0]),
"angular_velocity": np.array([0.0, 0.0, 0.0]),
"linear_acceleration": np.array([0.0, 0.0, 0.0]),
}
@property
def state(self):
"""
(dict) The 'state' of the sensor, i.e. the data produced by the sensor at any given point in time
"""
return self._state
@Sensor.update_at_rate
def update(self, state: State, dt: float):
"""Method that implements the logic of an IMU. In this method we start by generating the random walk of the
gyroscope. This value is then added to the real angular velocity of the vehicle (FLU relative to ENU inertial frame
expressed in FLU body frame). The same logic is followed for the accelerometer and the accelerations. After this step,
the angular velocity is rotated such that it expressed a FRD body frame, relative to a NED inertial frame, expressed
in the FRD body frame. Additionally, the acceleration is also rotated, such that it becomes expressed in the body
FRD frame of the vehicle. This sensor outputs data that follows the PX4 adopted standard.
Args:
state (State): The current state of the vehicle.
dt (float): The time elapsed between the previous and current function calls (s).
Returns:
(dict) A dictionary containing the current state of the sensor (the data produced by the sensor)
"""
# Gyroscopic terms
tau_g: float = self._accelerometer_bias_correlation_time
# Discrete-time standard deviation equivalent to an "integrating" sampler with integration time dt
sigma_g_d: float = 1 / np.sqrt(dt) * self._gyroscope_noise_density
sigma_b_g: float = self._gyroscope_random_walk
# Compute exact covariance of the process after dt [Maybeck 4-114]
sigma_b_g_d: float = np.sqrt(-sigma_b_g * sigma_b_g * tau_g / 2.0 * (np.exp(-2.0 * dt / tau_g) - 1.0))
# Compute state-transition
phi_g_d: float = np.exp(-1.0 / tau_g * dt)
# Simulate gyroscope noise processes and add them to the true angular rate.
angular_velocity: np.ndarray = np.zeros((3,))
for i in range(3):
self._gyroscope_bias[i] = phi_g_d * self._gyroscope_bias[i] + sigma_b_g_d * np.random.randn()
angular_velocity[i] = state.angular_velocity[i] + sigma_g_d * np.random.randn() + self._gyroscope_bias[i]
# Accelerometer terms
tau_a: float = self._accelerometer_bias_correlation_time
# Discrete-time standard deviation equivalent to an "integrating" sampler with integration time dt
sigma_a_d: float = 1.0 / np.sqrt(dt) * self._accelerometer_noise_density
sigma_b_a: float = self._accelerometer_random_walk
# Compute exact covariance of the process after dt [Maybeck 4-114].
sigma_b_a_d: float = np.sqrt(-sigma_b_a * sigma_b_a * tau_a / 2.0 * (np.exp(-2.0 * dt / tau_a) - 1.0))
# Compute state-transition.
phi_a_d: float = np.exp(-1.0 / tau_a * dt)
# Compute the linear acceleration from diferentiating the velocity of the vehicle expressed in the inertial frame
linear_acceleration_inertial = (state.linear_velocity - self._prev_linear_velocity) / dt
linear_acceleration_inertial = linear_acceleration_inertial - GRAVITY_VECTOR
# Update the previous linear velocity for the next computation
self._prev_linear_velocity = state.linear_velocity
# Compute the linear acceleration of the body frame, with respect to the inertial frame, expressed in the body frame
linear_acceleration = np.array(Rotation.from_quat(state.attitude).inv().apply(linear_acceleration_inertial))
# Simulate the accelerometer noise processes and add them to the true linear aceleration values
for i in range(3):
self._accelerometer_bias[i] = phi_a_d * self._accelerometer_bias[i] + sigma_b_a_d * np.random.rand()
linear_acceleration[i] = (
linear_acceleration[i] + sigma_a_d * np.random.randn()
) #+ self._accelerometer_bias[i]
# TODO - Add small "noisy" to the attitude
# --------------------------------------------------------------------------------------------
# Apply rotations such that we express the IMU data according to the FRD body frame convention
# --------------------------------------------------------------------------------------------
# Convert the orientation to the FRD-NED standard
attitude_flu_enu = Rotation.from_quat(state.attitude)
attitude_frd_enu = attitude_flu_enu * rot_FLU_to_FRD
attitude_frd_ned = rot_ENU_to_NED * attitude_frd_enu
# Convert the angular velocity from FLU to FRD standard
angular_velocity_frd = rot_FLU_to_FRD.apply(angular_velocity)
# Convert the linear acceleration in the body frame from FLU to FRD standard
linear_acceleration_frd = rot_FLU_to_FRD.apply(linear_acceleration)
# Add the values to the dictionary and return it
self._state = {
"orientation": attitude_frd_ned.as_quat(),
"angular_velocity": angular_velocity_frd,
"linear_acceleration": linear_acceleration_frd,
}
return self._state
| 8,652 | Python | 48.445714 | 167 | 0.624942 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/logic/interface/pegasus_interface.py | """
| File: pegasus_interface.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Definition of the PegasusInterface class (a singleton) that is used to manage the Pegasus framework.
"""
__all__ = ["PegasusInterface"]
# Importing Lock in ordef to have a multithread safe Pegasus singleton that manages the entire Pegasus extension
import gc
import yaml
import asyncio
import os
from threading import Lock
# NVidia API imports
import carb
import omni.kit.app
from omni.isaac.core.world import World
from omni.isaac.core.utils.stage import clear_stage, create_new_stage_async, update_stage_async
from omni.isaac.core.utils.viewports import set_camera_view
import omni.isaac.core.utils.nucleus as nucleus
# Pegasus Simulator internal API
from pegasus.simulator.params import DEFAULT_WORLD_SETTINGS, SIMULATION_ENVIRONMENTS, CONFIG_FILE
from pegasus.simulator.logic.vehicle_manager import VehicleManager
class PegasusInterface:
"""
PegasusInterface is a singleton class (there is only one object instance at any given time) that will be used
to
"""
# The object instance of the Vehicle Manager
_instance = None
_is_initialized = False
# Lock for safe multi-threading
_lock: Lock = Lock()
def __init__(self):
"""
Initialize the PegasusInterface singleton object (only runs once at a time)
"""
# If we already have an instance of the PegasusInterface, do not overwrite it!
if PegasusInterface._is_initialized:
return
carb.log_info("Initializing the Pegasus Simulator Extension")
PegasusInterface._is_initialized = True
# Get a handle to the vehicle manager instance which will manage which vehicles are spawned in the world
# to be controlled and simulated
self._vehicle_manager = VehicleManager()
# Initialize the world with the default simulation settings
self._world_settings = DEFAULT_WORLD_SETTINGS
self._world = None
#self.initialize_world()
# Initialize the latitude, longitude and altitude of the simulated environment at the (0.0, 0.0, 0.0) coordinate
# from the extension configuration file
self._latitude, self._longitude, self._altitude = self._get_global_coordinates_from_config()
# Get the px4_path from the extension configuration file
self._px4_path: str = self._get_px4_path_from_config()
self._px4_default_airframe: str = self._get_px4_default_airframe_from_config()
carb.log_info("Default PX4 path:" + str(self._px4_path))
@property
def world(self):
"""The current omni.isaac.core.world World instance
Returns:
omni.isaac.core.world: The world instance
"""
return self._world
@property
def vehicle_manager(self):
"""The instance of the VehicleManager.
Returns:
VehicleManager: The current instance of the VehicleManager.
"""
return self._vehicle_manager
@property
def latitude(self):
"""The latitude of the origin of the simulated world in degrees.
Returns:
float: The latitude of the origin of the simulated world in degrees.
"""
return self._latitude
@property
def longitude(self):
"""The longitude of the origin of the simulated world in degrees.
Returns:
float: The longitude of the origin of the simulated world in degrees.
"""
return self._longitude
@property
def altitude(self):
"""The altitude of the origin of the simulated world in meters.
Returns:
float: The latitude of the origin of the simulated world in meters.
"""
return self._altitude
@property
def px4_path(self):
"""A string with the installation directory for PX4 (if it was setup). Otherwise it is None.
Returns:
str: A string with the installation directory for PX4 (if it was setup). Otherwise it is None.
"""
return self._px4_path
@property
def px4_default_airframe(self):
"""A string with the PX4 default airframe (if it was setup). Otherwise it is None.
Returns:
str: A string with the PX4 default airframe (if it was setup). Otherwise it is None.
"""
return self._px4_default_airframe
def set_global_coordinates(self, latitude=None, longitude=None, altitude=None):
"""Method that can be used to set the latitude, longitude and altitude of the simulation world at the origin.
Args:
latitude (float): The latitude of the origin of the simulated world in degrees. Defaults to None.
longitude (float): The longitude of the origin of the simulated world in degrees. Defaults to None.
altitude (float): The altitude of the origin of the simulated world in meters. Defaults to None.
"""
if latitude is not None:
self._latitude = latitude
if longitude is not None:
self._longitude = longitude
if self.altitude is not None:
self._altitude = altitude
carb.log_warn("New global coordinates set to: " + str(self._latitude) + ", " + str(self._longitude) + ", " + str(self._altitude))
def initialize_world(self):
"""Method that initializes the world object
"""
self._world = World(**self._world_settings)
#asyncio.ensure_future(self._world.initialize_simulation_context_async())
def get_vehicle(self, stage_prefix: str):
"""Method that returns the vehicle object given its 'stage_prefix', i.e., the name the vehicle was spawned with in the simulator.
Args:
stage_prefix (str): The name the vehicle will present in the simulator when spawned.
Returns:
Vehicle: Returns a vehicle object that was spawned with the given 'stage_prefix'
"""
return self._vehicle_manager.vehicles[stage_prefix]
def get_all_vehicles(self):
"""
Method that returns a list of vehicles that are considered active in the simulator
Returns:
list: A list of all vehicles that are currently instantiated.
"""
return self._vehicle_manager.vehicles
def get_default_environments(self):
"""
Method that returns a dictionary containing all the default simulation environments and their path
"""
return SIMULATION_ENVIRONMENTS
def generate_quadrotor_config_from_yaml(self, file: str):
"""_summary_
Args:
file (str): _description_
Returns:
_type_: _description_
"""
# Load the quadrotor configuration data from the given yaml file
with open(file) as f:
data = yaml.safe_load(f)
return self.generate_quadrotor_config_from_dict(data)
def clear_scene(self):
"""
Method that when invoked will clear all vehicles and the simulation environment, leaving only an empty world with a physics environment.
"""
# If the physics simulation was running, stop it first
if self.world is not None:
self.world.stop()
# Clear the world
if self.world is not None:
self.world.clear_all_callbacks()
self.world.clear()
# Clear the stage
clear_stage()
# Remove all the robots that were spawned
self._vehicle_manager.remove_all_vehicles()
# Call python's garbage collection
gc.collect()
# Re-initialize the physics context
asyncio.ensure_future(self._world.initialize_simulation_context_async())
carb.log_info("Current scene and its vehicles has been deleted")
async def load_environment_async(self, usd_path: str, force_clear: bool=False):
"""Method that loads a given world (specified in the usd_path) into the simulator asynchronously.
Args:
usd_path (str): The path where the USD file describing the world is located.
force_clear (bool): Whether to perform a clear before loading the asset. Defaults to False.
It should be set to True only if the method is invoked from an App (GUI mode).
"""
# Reset and pause the world simulation (only if force_clear is true)
# This is done to maximize the support between running in GUI as extension vs App
if force_clear == True:
# Create a new stage and initialize (or re-initialized) the world
await create_new_stage_async()
self._world = World(**self._world_settings)
await self._world.initialize_simulation_context_async()
self._world = World.instance()
await self.world.reset_async()
await self.world.stop_async()
# Load the USD asset that will be used for the environment
try:
self.load_asset(usd_path, "/World/layout")
except Exception as e:
carb.log_warn("Could not load the desired environment: " + str(e))
carb.log_info("A new environment has been loaded successfully")
def load_environment(self, usd_path: str, force_clear: bool=False):
"""Method that loads a given world (specified in the usd_path) into the simulator. If invoked from a python app,
this method should have force_clear=False, as the world reset and stop are performed asynchronously by this method,
and when we are operating in App mode, we want everything to run in sync.
Args:
usd_path (str): The path where the USD file describing the world is located.
force_clear (bool): Whether to perform a clear before loading the asset. Defaults to False.
"""
asyncio.ensure_future(self.load_environment_async(usd_path, force_clear))
def load_nvidia_environment(self, environment_asset: str = "Hospital/hospital.usd"):
"""
Method that is used to load NVidia internally provided USD stages into the simulaton World
Args:
environment_asset (str): The name of the nvidia asset inside the /Isaac/Environments folder. Default to Hospital/hospital.usd.
"""
# Get the nvidia assets root path
nvidia_assets_path = nucleus.get_assets_root_path()
# Define the environments path inside the NVidia assets
environments_path = "/Isaac/Environments"
# Get the complete usd path
usd_path = nvidia_assets_path + environments_path + "/" + environment_asset
# Try to load the asset into the world
self.load_asset(usd_path, "/World/layout")
def load_asset(self, usd_asset: str, stage_prefix: str):
"""
Method that will attempt to load an asset into the current simulation world, given the USD asset path.
Args:
usd_asset (str): The path where the USD file describing the world is located.
stage_prefix (str): The name the vehicle will present in the simulator when spawned.
"""
# Try to check if there is already a prim with the same stage prefix in the stage
if self._world.stage.GetPrimAtPath(stage_prefix):
raise Exception("A primitive already exists at the specified path")
# Create the stage primitive and load the usd into it
prim = self._world.stage.DefinePrim(stage_prefix)
success = prim.GetReferences().AddReference(usd_asset)
if not success:
raise Exception("The usd asset" + usd_asset + "is not load at stage path " + stage_prefix)
def set_viewport_camera(self, camera_position, camera_target):
"""Sets the viewport camera to given position and makes it point to another target position.
Args:
camera_position (list): A list with [X, Y, Z] coordinates of the camera in ENU inertial frame.
camera_target (list): A list with [X, Y, Z] coordinates of the target that the camera should point to in the ENU inertial frame.
"""
# Set the camera view to a fixed value
set_camera_view(eye=camera_position, target=camera_target)
def set_world_settings(self, physics_dt=None, stage_units_in_meters=None, rendering_dt=None):
"""
Set the current world settings to the pre-defined settings. TODO - finish the implementation of this method.
For now these new setting will never override the default ones.
"""
# Set the physics engine update rate
if physics_dt is not None:
self._world_settings["physics_dt"] = physics_dt
# Set the units of the simulator to meters
if stage_units_in_meters is not None:
self._world_settings["stage_units_in_meters"] = stage_units_in_meters
# Set the render engine update rate (might not be the same as the physics engine)
if rendering_dt is not None:
self._world_settings["rendering_dt"] = rendering_dt
def _get_px4_path_from_config(self):
"""
Method that reads the configured PX4 installation directory from the extension configuration file
Returns:
str: A string with the path to the px4 configuration directory or empty string ''
"""
px4_dir = ""
# Open the configuration file. If it fails, just return the empty path
try:
with open(CONFIG_FILE, 'r') as f:
data = yaml.safe_load(f)
px4_dir = os.path.expanduser(data.get("px4_dir", None))
except:
carb.log_warn("Could not retrieve px4_dir from: " + str(CONFIG_FILE))
return px4_dir
def _get_px4_default_airframe_from_config(self):
"""
Method that reads the configured PX4 default airframe from the extension configuration file
Returns:
str: A string with the path to the PX4 default airframe or empty string ''
"""
px4_default_airframe = ""
# Open the configuration file. If it fails, just return the empty path
try:
with open(CONFIG_FILE, 'r') as f:
data = yaml.safe_load(f)
px4_default_airframe = os.path.expanduser(data.get("px4_default_airframe", None))
except:
carb.log_warn("Could not retrieve px4_default_airframe from: " + str(CONFIG_FILE))
return px4_default_airframe
def _get_global_coordinates_from_config(self):
"""Method that reads the default latitude, longitude and altitude from the extension configuration file
Returns:
(float, float, float): A tuple of 3 floats with the latitude, longitude and altitude to use as the origin of the world
"""
latitude = 0.0
longitude = 0.0
altitude = 0.0
# Open the configuration file. If it fails, just return the empty path
try:
with open(CONFIG_FILE, 'r') as f:
data = yaml.safe_load(f)
# Try to read the coordinates from the configuration file
global_coordinates = data.get("global_coordinates", {})
latitude = global_coordinates.get("latitude", 0.0)
longitude = global_coordinates.get("longitude", 0.0)
altitude = global_coordinates.get("altitude", 0.0)
except:
carb.log_warn("Could not retrieve the global coordinates from: " + str(CONFIG_FILE))
return (latitude, longitude, altitude)
def set_px4_path(self, path: str):
"""Method that allows a user to save a new px4 directory in the configuration files of the extension.
Args:
absolute_path (str): The new path of the px4-autopilot installation directory
"""
# Save the new path for current use during this simulation
self._px4_path = os.path.expanduser(path)
# Save the new path in the configurations file for the next simulations
try:
# Open the configuration file and the all the configurations that it contains
with open(CONFIG_FILE, 'r') as f:
data = yaml.safe_load(f)
# Open the configuration file. If it fails, just warn in the console
with open(CONFIG_FILE, 'w') as f:
data["px4_dir"] = path
yaml.dump(data, f)
except:
carb.log_warn("Could not save px4_dir to: " + str(CONFIG_FILE))
carb.log_warn("New px4_dir set to: " + str(self._px4_path))
def set_px4_default_airframe(self, airframe: str):
"""Method that allows a user to save a new px4 default airframe for the extension.
Args:
absolute_path (str): The new px4 default airframe
"""
# Save the new path for current use during this simulation
self._px4_default_airframe = airframe
# Save the new path in the configurations file for the next simulations
try:
# Open the configuration file and the all the configurations that it contains
with open(CONFIG_FILE, 'r') as f:
data = yaml.safe_load(f)
# Open the configuration file. If it fails, just warn in the console
with open(CONFIG_FILE, 'w') as f:
data["px4_default_airframe"] = airframe
yaml.dump(data, f)
except:
carb.log_warn("Could not save px4_default_airframe to: " + str(CONFIG_FILE))
carb.log_warn("New px4_default_airframe set to: " + str(self._px4_default_airframe))
def set_default_global_coordinates(self):
"""
Method that sets the latitude, longitude and altitude from the pegasus interface to the
default global coordinates specified in the extension configuration file
"""
self._latitude, self._longitude, self._altitude = self._get_global_coordinates_from_config()
def set_new_default_global_coordinates(self, latitude: float=None, longitude: float=None, altitude: float=None):
# Set the current global coordinates to the new default global coordinates
self.set_global_coordinates(latitude, longitude, altitude)
# Update the default global coordinates in the configuration file
try:
# Open the configuration file and the all the configurations that it contains
with open(CONFIG_FILE, 'r') as f:
data = yaml.safe_load(f)
# Open the configuration file. If it fails, just warn in the console
with open(CONFIG_FILE, 'w') as f:
if latitude is not None:
data["global_coordinates"]["latitude"] = latitude
if longitude is not None:
data["global_coordinates"]["longitude"] = longitude
if altitude is not None:
data["global_coordinates"]["altitude"] = altitude
# Save the updated configurations
yaml.dump(data, f)
except:
carb.log_warn("Could not save the new global coordinates to: " + str(CONFIG_FILE))
carb.log_warn("New global coordinates set to: latitude=" + str(latitude) + ", longitude=" + str(longitude) + ", altitude=" + str(altitude))
def __new__(cls):
"""Allocates the memory and creates the actual PegasusInterface object is not instance exists yet. Otherwise,
returns the existing instance of the PegasusInterface class.
Returns:
VehicleManger: the single instance of the VehicleManager class
"""
# Use a lock in here to make sure we do not have a race condition
# when using multi-threading and creating the first instance of the Pegasus extension manager
with cls._lock:
if cls._instance is None:
cls._instance = object.__new__(cls)
return PegasusInterface._instance
def __del__(self):
"""Destructor for the object. Destroys the only existing instance of this class."""
PegasusInterface._instance = None
PegasusInterface._is_initialized = False | 20,371 | Python | 38.557281 | 147 | 0.63502 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/logic/backends/backend.py | """
| File: backend.py
| Author: Marcelo Jacinto ([email protected])
| Description:
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
class Backend:
"""
This class defines the templates for the communication and control backend. Every vehicle can have at least one backend
at the same time. Every timestep, the methods 'update_state' and 'update_sensor' are called to update the data produced
by the simulation, i.e. for every time step the backend will receive teh current state of the vehicle and its sensors.
Additionally, the backend must provide a method named 'input_reference' which will be used by the vehicle simulation
to know the desired angular velocities to apply to the rotors of the vehicle. The method 'update' is called on every
physics step and can be use to implement some logic or send data to another interface (such as PX4 through mavlink or ROS2).
The methods 'start', 'stop' and 'reset' are callbacks that get called when the simulation is started, stoped and reset as the name implies.
"""
def __init__(self):
"""Initialize the Backend class
"""
self._vehicle = None
"""
Properties
"""
@property
def vehicle(self):
"""A reference to the vehicle associated with this backend.
Returns:
Vehicle: A reference to the vehicle associated with this backend.
"""
return self._vehicle
def initialize(self, vehicle):
"""A method that can be invoked when the simulation is starting to give access to the control backend
to the entire vehicle object. Even though we provide update_sensor and update_state callbacks that are called
at every physics step with the latest vehicle state and its sensor data, having access to the full vehicle
object may prove usefull under some circumstances. This is nice to give users the possibility of overiding
default vehicle behaviour via this control backend structure.
Args:
vehicle (Vehicle): A reference to the vehicle that this sensor is associated with
"""
self._vehicle = vehicle
def update_sensor(self, sensor_type: str, data):
"""Method that when implemented, should handle the receival of sensor data
Args:
sensor_type (str): A name that describes the type of sensor
data (dict): A dictionary that contains the data produced by the sensor
"""
pass
def update_state(self, state):
"""Method that when implemented, should handle the receival of the state of the vehicle using this callback
Args:
state (State): The current state of the vehicle.
"""
pass
def input_reference(self):
"""Method that when implemented, should return a list of desired angular velocities to apply to the vehicle rotors
"""
return []
def update(self, dt: float):
"""Method that when implemented, should be used to update the state of the backend and the information being sent/received
from the communication interface. This method will be called by the simulation on every physics step
Args:
dt (float): The time elapsed between the previous and current function calls (s).
"""
pass
def start(self):
"""Method that when implemented should handle the begining of the simulation of vehicle
"""
pass
def stop(self):
"""Method that when implemented should handle the stopping of the simulation of vehicle
"""
pass
def reset(self):
"""Method that when implemented, should handle the reset of the vehicle simulation to its original state
"""
pass
| 3,830 | Python | 40.193548 | 143 | 0.674935 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/logic/backends/__init__.py | """
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
from .backend import Backend
from .mavlink_backend import MavlinkBackend, MavlinkBackendConfig
from .ros2_backend import ROS2Backend
| 288 | Python | 31.111108 | 82 | 0.784722 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/logic/backends/ros2_backend.py | """
| File: ros2_backend.py
| Author: Marcelo Jacinto ([email protected])
| Description: File that implements the ROS2 Backend for communication/control with/of the vehicle simulation through ROS2 topics
| License: BSD-3-Clause. Copyright (c) 2024, Marcelo Jacinto. All rights reserved.
"""
import carb
from omni.isaac.core.utils.extensions import disable_extension, enable_extension
# Perform some checks, because Isaac Sim some times does not play nice when using ROS/ROS2
disable_extension("omni.isaac.ros_bridge")
enable_extension("omni.isaac.ros2_bridge")
# ROS2 imports
import rclpy
from std_msgs.msg import Float64
from geometry_msgs.msg import TransformStamped
from sensor_msgs.msg import Imu, MagneticField, NavSatFix, NavSatStatus
from geometry_msgs.msg import PoseStamped, TwistStamped, AccelStamped
# TF imports
from tf2_ros.static_transform_broadcaster import StaticTransformBroadcaster
from tf2_ros.transform_broadcaster import TransformBroadcaster
from pegasus.simulator.logic.backends.backend import Backend
class ROS2Backend(Backend):
def __init__(self, vehicle_id: int, num_rotors=4, config: dict = {}):
"""Initialize the ROS2 Camera class
Args:
camera_prim_path (str): Path to the camera prim. Global path when it starts with `/`, else local to vehicle prim path
config (dict): A Dictionary that contains all the parameters for configuring the ROS2Camera - it can be empty or only have some of the parameters used by the ROS2Camera.
Examples:
The dictionary default parameters are
>>> {"namespace": "drone" # Namespace to append to the topics
>>> "pub_pose": True, # Publish the pose of the vehicle
>>> "pub_twist": True, # Publish the twist of the vehicle
>>> "pub_twist_inertial": True, # Publish the twist of the vehicle in the inertial frame
>>> "pub_accel": True, # Publish the acceleration of the vehicle
>>> "pub_imu": True, # Publish the IMU data
>>> "pub_mag": True, # Publish the magnetometer data
>>> "pub_gps": True, # Publish the GPS data
>>> "pub_gps_vel": True, # Publish the GPS velocity data
>>> "pose_topic": "state/pose", # Position and attitude of the vehicle in ENU
>>> "twist_topic": "state/twist", # Linear and angular velocities in the body frame of the vehicle
>>> "twist_inertial_topic": "state/twist_inertial" # Linear velocity of the vehicle in the inertial frame
>>> "accel_topic": "state/accel", # Linear acceleration of the vehicle in the inertial frame
>>> "imu_topic": "sensors/imu", # IMU data
>>> "mag_topic": "sensors/mag", # Magnetometer data
>>> "gps_topic": "sensors/gps", # GPS data
>>> "gps_vel_topic": "sensors/gps_twist", # GPS velocity data
"""
# Save the configurations for this backend
self._id = vehicle_id
self._num_rotors = num_rotors
self._namespace = config.get("namespace", "drone" + str(vehicle_id))
# Start the actual ROS2 setup here
rclpy.init()
self.node = rclpy.create_node("vehicle_" + str(vehicle_id))
# Create publishers for the state of the vehicle in ENU
if config.get("pub_pose", True):
self.pose_pub = self.node.create_publisher(PoseStamped, self._namespace + str(self._id) + "/" + config.get("pose_topic", "state/pose"), rclpy.qos.qos_profile_sensor_data)
if config.get("pub_twist", True):
self.twist_pub = self.node.create_publisher(TwistStamped, self._namespace + str(self._id) + "/" + config.get("twist_topic", "state/twist"), rclpy.qos.qos_profile_sensor_data)
if config.get("pub_twist_inertial", True):
self.twist_inertial_pub = self.node.create_publisher(TwistStamped, self._namespace + str(self._id) + "/" + config.get("twist_inertial_topic", "state/twist_inertial"), rclpy.qos.qos_profile_sensor_data)
if config.get("pub_accel", True):
self.accel_pub = self.node.create_publisher(AccelStamped, self._namespace + str(self._id) + "/" + config.get("accel_topic", "state/accel"), rclpy.qos.qos_profile_sensor_data)
# Create publishers for some sensor data
if config.get("pub_imu", True):
self.imu_pub = self.node.create_publisher(Imu, self._namespace + str(self._id) + "/" + config.get("imu_topic", "sensors/imu"), rclpy.qos.qos_profile_sensor_data)
if config.get("pub_mag", True):
self.mag_pub = self.node.create_publisher(MagneticField, self._namespace + str(self._id) + "/" + config.get("mag_topic", "sensors/mag"), rclpy.qos.qos_profile_sensor_data)
if config.get("pub_gps", True):
self.gps_pub = self.node.create_publisher(NavSatFix, self._namespace + str(self._id) + "/" + config.get("gps_topic", "sensors/gps"), rclpy.qos.qos_profile_sensor_data)
if config.get("pub_gps_vel", True):
self.gps_vel_pub = self.node.create_publisher(TwistStamped, self._namespace + str(self._id) + "/" + config.get("gps_vel_topic", "sensors/gps_twist"), rclpy.qos.qos_profile_sensor_data)
# Subscribe to vector of floats with the target angular velocities to control the vehicle
# This is not ideal, but we need to reach out to NVIDIA so that they can improve the ROS2 support with custom messages
# The current setup as it is.... its a pain!!!!
self.rotor_subs = []
for i in range(self._num_rotors):
self.rotor_subs.append(self.node.create_subscription(Float64, self._namespace + str(self._id) + "/control/rotor" + str(i) + "/ref", lambda x: self.rotor_callback(x, i),10))
# Setup zero input reference for the thrusters
self.input_ref = [0.0 for i in range(self._num_rotors)]
# Initiliaze the static tf broadcaster for the sensors
self.tf_static_broadcaster = StaticTransformBroadcaster(self.node)
# Initialize the static tf broadcaster for the base_link transformation
self.send_static_transforms()
# Initialize the dynamic tf broadcaster for the position of the body of the vehicle (base_link) with respect to the inertial frame (map - ENU) expressed in the inertil frame (map - ENU)
self.tf_broadcaster = TransformBroadcaster(self.node)
def send_static_transforms(self):
# Create the transformation from base_link FLU (ROS standard) to base_link FRD (standard in airborn and marine vehicles)
t = TransformStamped()
t.header.stamp = self.node.get_clock().now().to_msg()
t.header.frame_id = self._namespace + '_' + 'base_link'
t.child_frame_id = self._namespace + '_' + 'base_link_frd'
# Converts from FLU to FRD
t.transform.translation.x = 0.0
t.transform.translation.y = 0.0
t.transform.translation.z = 0.0
t.transform.rotation.x = 1.0
t.transform.rotation.y = 0.0
t.transform.rotation.z = 0.0
t.transform.rotation.w = 0.0
self.tf_static_broadcaster.sendTransform(t)
# Create the transform from map, i.e inertial frame (ROS standard) to map_ned (standard in airborn or marine vehicles)
t = TransformStamped()
t.header.stamp = self.node.get_clock().now().to_msg()
t.header.frame_id = "map"
t.child_frame_id = "map_ned"
# Converts ENU to NED
t.transform.translation.x = 0.0
t.transform.translation.y = 0.0
t.transform.translation.z = 0.0
t.transform.rotation.x = -0.7071068
t.transform.rotation.y = -0.7071068
t.transform.rotation.z = 0.0
t.transform.rotation.w = 0.0
self.tf_static_broadcaster.sendTransform(t)
def update_state(self, state):
"""
Method that when implemented, should handle the receivel of the state of the vehicle using this callback
"""
pose = PoseStamped()
twist = TwistStamped()
twist_inertial = TwistStamped()
accel = AccelStamped()
# Update the header
pose.header.stamp = self.node.get_clock().now().to_msg()
twist.header.stamp = pose.header.stamp
twist_inertial.header.stamp = pose.header.stamp
accel.header.stamp = pose.header.stamp
pose.header.frame_id = "map"
twist.header.frame_id = self._namespace + "_" + "base_link"
twist_inertial.header.frame_id = "map"
accel.header.frame_id = "map"
# Fill the position and attitude of the vehicle in ENU
pose.pose.position.x = state.position[0]
pose.pose.position.y = state.position[1]
pose.pose.position.z = state.position[2]
pose.pose.orientation.x = state.attitude[0]
pose.pose.orientation.y = state.attitude[1]
pose.pose.orientation.z = state.attitude[2]
pose.pose.orientation.w = state.attitude[3]
# Fill the linear and angular velocities in the body frame of the vehicle
twist.twist.linear.x = state.linear_body_velocity[0]
twist.twist.linear.y = state.linear_body_velocity[1]
twist.twist.linear.z = state.linear_body_velocity[2]
twist.twist.angular.x = state.angular_velocity[0]
twist.twist.angular.y = state.angular_velocity[1]
twist.twist.angular.z = state.angular_velocity[2]
# Fill the linear velocity of the vehicle in the inertial frame
twist_inertial.twist.linear.x = state.linear_velocity[0]
twist_inertial.twist.linear.y = state.linear_velocity[1]
twist_inertial.twist.linear.z = state.linear_velocity[2]
# Fill the linear acceleration in the inertial frame
accel.accel.linear.x = state.linear_acceleration[0]
accel.accel.linear.y = state.linear_acceleration[1]
accel.accel.linear.z = state.linear_acceleration[2]
# Publish the messages containing the state of the vehicle
self.pose_pub.publish(pose)
self.twist_pub.publish(twist)
self.twist_inertial_pub.publish(twist_inertial)
self.accel_pub.publish(accel)
# Update the dynamic tf broadcaster with the current position of the vehicle in the inertial frame
t = TransformStamped()
t.header.stamp = pose.header.stamp
t.header.frame_id = "map"
t.child_frame_id = self._namespace + '_' + 'base_link'
t.transform.translation.x = state.position[0]
t.transform.translation.y = state.position[1]
t.transform.translation.z = state.position[2]
t.transform.rotation.x = state.attitude[0]
t.transform.rotation.y = state.attitude[1]
t.transform.rotation.z = state.attitude[2]
t.transform.rotation.w = state.attitude[3]
self.tf_broadcaster.sendTransform(t)
def rotor_callback(self, ros_msg: Float64, rotor_id):
# Update the reference for the rotor of the vehicle
self.input_ref[rotor_id] = float(ros_msg.data)
def update_sensor(self, sensor_type: str, data):
"""
Method that when implemented, should handle the receival of sensor data
"""
if sensor_type == "IMU":
self.update_imu_data(data)
elif sensor_type == "GPS":
self.update_gps_data(data)
elif sensor_type == "Magnetometer":
self.update_mag_data(data)
else:
pass
def update_imu_data(self, data):
msg = Imu()
# Update the header
msg.header.stamp = self.node.get_clock().now().to_msg()
msg.header.frame_id = self._namespace + '_' + "base_link_frd"
# Update the angular velocity (NED + FRD)
msg.angular_velocity.x = data["angular_velocity"][0]
msg.angular_velocity.y = data["angular_velocity"][1]
msg.angular_velocity.z = data["angular_velocity"][2]
# Update the linear acceleration (NED)
msg.linear_acceleration.x = data["linear_acceleration"][0]
msg.linear_acceleration.y = data["linear_acceleration"][1]
msg.linear_acceleration.z = data["linear_acceleration"][2]
# Publish the message with the current imu state
self.imu_pub.publish(msg)
def update_gps_data(self, data):
msg = NavSatFix()
msg_vel = TwistStamped()
# Update the headers
msg.header.stamp = self.node.get_clock().now().to_msg()
msg.header.frame_id = "map_ned"
msg_vel.header.stamp = msg.header.stamp
msg_vel.header.frame_id = msg.header.frame_id
# Update the status of the GPS
status_msg = NavSatStatus()
status_msg.status = 0 # unaugmented fix position
status_msg.service = 1 # GPS service
msg.status = status_msg
# Update the latitude, longitude and altitude
msg.latitude = data["latitude"]
msg.longitude = data["longitude"]
msg.altitude = data["altitude"]
# Update the velocity of the vehicle measured by the GPS in the inertial frame (NED)
msg_vel.twist.linear.x = data["velocity_north"]
msg_vel.twist.linear.y = data["velocity_east"]
msg_vel.twist.linear.z = data["velocity_down"]
# Publish the message with the current GPS state
self.gps_pub.publish(msg)
self.gps_vel_pub.publish(msg_vel)
def update_mag_data(self, data):
msg = MagneticField()
# Update the headers
msg.header.stamp = self.node.get_clock().now().to_msg()
msg.header.frame_id = "base_link_frd"
msg.magnetic_field.x = data["magnetic_field"][0]
msg.magnetic_field.y = data["magnetic_field"][1]
msg.magnetic_field.z = data["magnetic_field"][2]
# Publish the message with the current magnetic data
self.mag_pub.publish(msg)
def input_reference(self):
"""
Method that is used to return the latest target angular velocities to be applied to the vehicle
Returns:
A list with the target angular velocities for each individual rotor of the vehicle
"""
return self.input_ref
def update(self, dt: float):
"""
Method that when implemented, should be used to update the state of the backend and the information being sent/received
from the communication interface. This method will be called by the simulation on every physics step
"""
# In this case, do nothing as we are sending messages as soon as new data arrives from the sensors and state
# and updating the reference for the thrusters as soon as receiving from ROS2 topics
# Just poll for new ROS 2 messages in a non-blocking way
rclpy.spin_once(self.node, timeout_sec=0)
def start(self):
"""
Method that when implemented should handle the begining of the simulation of vehicle
"""
# Reset the reference for the thrusters
self.input_ref = [0.0 for i in range(self._num_rotors)]
def stop(self):
"""
Method that when implemented should handle the stopping of the simulation of vehicle
"""
# Reset the reference for the thrusters
self.input_ref = [0.0 for i in range(self._num_rotors)]
def reset(self):
"""
Method that when implemented, should handle the reset of the vehicle simulation to its original state
"""
# Reset the reference for the thrusters
self.input_ref = [0.0 for i in range(self._num_rotors)] | 15,962 | Python | 45.40407 | 213 | 0.626237 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/logic/backends/mavlink_backend.py | """
| File: mavlink_backend.py
| Author: Marcelo Jacinto ([email protected])
| Description: File that implements the Mavlink Backend for communication/control with/of the vehicle simulation
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
__all__ = ["MavlinkBackend", "MavlinkBackendConfig"]
import carb
import time
import numpy as np
from pymavlink import mavutil
from pegasus.simulator.logic.state import State
from pegasus.simulator.logic.backends.backend import Backend
from pegasus.simulator.logic.interface.pegasus_interface import PegasusInterface
from pegasus.simulator.logic.backends.tools.px4_launch_tool import PX4LaunchTool
class SensorSource:
""" The binary codes to signal which simulated data is being sent through mavlink
Atribute:
| ACCEL (int): mavlink binary code for the accelerometer (0b0000000000111 = 7)
| GYRO (int): mavlink binary code for the gyroscope (0b0000000111000 = 56)
| MAG (int): mavlink binary code for the magnetometer (0b0000111000000=448)
| BARO (int): mavlink binary code for the barometer (0b1101000000000=6656)
| DIFF_PRESS (int): mavlink binary code for the pressure sensor (0b0010000000000=1024)
"""
ACCEL: int = 7
GYRO: int = 56
MAG: int = 448
BARO: int = 6656
DIFF_PRESS: int = 1024
class SensorMsg:
"""
An auxiliary data class where we write all the sensor data that is going to be sent through mavlink
"""
def __init__(self):
# IMU Data
self.new_imu_data: bool = False
self.received_first_imu: bool = False
self.xacc: float = 0.0
self.yacc: float = 0.0
self.zacc: float = 0.0
self.xgyro: float = 0.0
self.ygyro: float = 0.0
self.zgyro: float = 0.0
# Baro Data
self.new_bar_data: bool = False
self.abs_pressure: float = 0.0
self.pressure_alt: float = 0.0
self.temperature: float = 0.0
# Magnetometer Data
self.new_mag_data: bool = False
self.xmag: float = 0.0
self.ymag: float = 0.0
self.zmag: float = 0.0
# Airspeed Data
self.new_press_data: bool = False
self.diff_pressure: float = 0.0
# GPS Data
self.new_gps_data: bool = False
self.fix_type: int = 0
self.latitude_deg: float = -999
self.longitude_deg: float = -999
self.altitude: float = -999
self.eph: float = 1.0
self.epv: float = 1.0
self.velocity: float = 0.0
self.velocity_north: float = 0.0
self.velocity_east: float = 0.0
self.velocity_down: float = 0.0
self.cog: float = 0.0
self.satellites_visible: int = 0
# Vision Pose
self.new_vision_data: bool = False
self.vision_x: float = 0.0
self.vision_y: float = 0.0
self.vision_z: float = 0.0
self.vision_roll: float = 0.0
self.vision_pitch: float = 0.0
self.vision_yaw: float = 0.0
self.vision_covariance = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
# Simulation State
self.new_sim_state: bool = False
self.sim_attitude = [1.0, 0.0, 0.0, 0.0] # [w, x, y, z]
self.sim_acceleration = [0.0, 0.0, 0.0] # [x,y,z body acceleration]
self.sim_angular_vel = [0.0, 0.0, 0.0] # [roll-rate, pitch-rate, yaw-rate] rad/s
self.sim_lat = 0.0 # [deg]
self.sim_lon = 0.0 # [deg]
self.sim_alt = 0.0 # [m]
self.sim_ind_airspeed = 0.0 # Indicated air speed
self.sim_true_airspeed = 0.0 # Indicated air speed
self.sim_velocity_inertial = [0.0, 0.0, 0.0] # North-east-down [m/s]
class ThrusterControl:
"""
An auxiliary data class that saves the thrusters command data received via mavlink and
scales them into individual angular velocities expressed in rad/s to apply to each rotor
"""
def __init__(
self,
num_rotors: int = 4,
input_offset=[0, 0, 0, 0],
input_scaling=[0, 0, 0, 0],
zero_position_armed=[100, 100, 100, 100],
):
"""Initialize the ThrusterControl object
Args:
num_rotors (int): The number of rotors that the actual system has 4.
input_offset (list): A list with the offsets to apply to the rotor values received via mavlink. Defaults to [0, 0, 0, 0].
input_scaling (list): A list with the scaling to apply to the rotor values received via mavlink. Defaults to [0, 0, 0, 0].
zero_position_armed (list): Another list of offsets to apply to the rotor values received via mavlink. Defaults to [100, 100, 100, 100].
"""
self.num_rotors: int = num_rotors
# Values to scale and offset the rotor control inputs received from PX4
assert len(input_offset) == self.num_rotors
self.input_offset = input_offset
assert len(input_scaling) == self.num_rotors
self.input_scaling = input_scaling
assert len(zero_position_armed) == self.num_rotors
self.zero_position_armed = zero_position_armed
# The actual speed references to apply to the vehicle rotor joints
self._input_reference = [0.0 for i in range(self.num_rotors)]
@property
def input_reference(self):
"""A list of floats with the angular velocities in rad/s
Returns:
list: A list of floats with the angular velocities to apply to each rotor, expressed in rad/s
"""
return self._input_reference
def update_input_reference(self, controls):
"""Takes a list with the thrust controls received via mavlink and scales them in order to generated
the equivalent angular velocities in rad/s
Args:
controls (list): A list of ints with thrust controls received via mavlink
"""
# Check if the number of controls received is correct
if len(controls) < self.num_rotors:
carb.log_warn("Did not receive enough inputs for all the rotors")
return
# Update the desired reference for every rotor (and saturate according to the min and max values)
for i in range(self.num_rotors):
# Compute the actual velocity reference to apply to each rotor
self._input_reference[i] = (controls[i] + self.input_offset[i]) * self.input_scaling[
i
] + self.zero_position_armed[i]
def zero_input_reference(self):
"""
When this method is called, the input_reference is updated such that every rotor is stopped
"""
self._input_reference = [0.0 for i in range(self.num_rotors)]
class MavlinkBackendConfig:
"""
An auxiliary data class used to store all the configurations for the mavlink communications.
"""
def __init__(self, config={}):
"""
Initialize the MavlinkBackendConfig class
Args:
config (dict): A Dictionary that contains all the parameters for configuring the Mavlink interface - it can be empty or only have some of the parameters used by this backend.
Examples:
The dictionary default parameters are
>>> {"vehicle_id": 0,
>>> "connection_type": "tcpin",
>>> "connection_ip": "localhost",
>>> "connection_baseport": 4560,
>>> "px4_autolaunch": True,
>>> "px4_dir": "PegasusInterface().px4_path",
>>> "px4_vehicle_model": "gazebo-classic_iris",
>>> "enable_lockstep": True,
>>> "num_rotors": 4,
>>> "input_offset": [0.0, 0.0, 0.0, 0.0],
>>> "input_scaling": [1000.0, 1000.0, 1000.0, 1000.0],
>>> "zero_position_armed": [100.0, 100.0, 100.0, 100.0],
>>> "update_rate": 250.0
>>> }
"""
# Configurations for the mavlink communication protocol (note: the vehicle id is sumed to the connection_baseport)
self.vehicle_id = config.get("vehicle_id", 0)
self.connection_type = config.get("connection_type", "tcpin")
self.connection_ip = config.get("connection_ip", "localhost")
self.connection_baseport = config.get("connection_baseport", 4560)
# Configure whether to launch px4 in the background automatically or not for every vehicle launched
self.px4_autolaunch: bool = config.get("px4_autolaunch", True)
self.px4_dir: str = config.get("px4_dir", PegasusInterface().px4_path)
self.px4_vehicle_model: str = config.get("px4_vehicle_model", "gazebo-classic_iris")
# Configurations to interpret the rotors control messages coming from mavlink
self.enable_lockstep: bool = config.get("enable_lockstep", True)
self.num_rotors: int = config.get("num_rotors", 4)
self.input_offset = config.get("input_offset", [0.0, 0.0, 0.0, 0.0])
self.input_scaling = config.get("input_scaling", [1000.0, 1000.0, 1000.0, 1000.0])
self.zero_position_armed = config.get("zero_position_armed", [100.0, 100.0, 100.0, 100.0])
# The update rate at which we will be sending data to mavlink (TODO - remove this from here in the future
# and infer directly from the function calls)
self.update_rate: float = config.get("update_rate", 250.0) # [Hz]
class MavlinkBackend(Backend):
""" The Mavlink Backend used to receive the vehicle's state and sensor data in order to send to PX4 through mavlink. It also
receives via mavlink the thruster commands to apply to each vehicle rotor.
"""
def __init__(self, config=MavlinkBackendConfig()):
"""Initialize the MavlinkBackend
Args:
config (MavlinkBackendConfig): The configuration class for the MavlinkBackend. Defaults to MavlinkBackendConfig().
"""
# Initialize the Backend object
super().__init__()
# Setup the desired mavlink connection port
# The connection will only be created once the simulation starts
self._vehicle_id = config.vehicle_id
self._connection = None
self._connection_port = (
config.connection_type
+ ":"
+ config.connection_ip
+ ":"
+ str(config.connection_baseport + config.vehicle_id)
)
# Check if we need to autolaunch px4 in the background or not
self.px4_autolaunch: bool = config.px4_autolaunch
self.px4_vehicle_model: str = config.px4_vehicle_model # only needed if px4_autolaunch == True
self.px4_tool: PX4LaunchTool = None
self.px4_dir: str = config.px4_dir
# Set the update rate used for sending the messages (TODO - remove this hardcoded value from here)
self._update_rate: float = config.update_rate
self._time_step: float = 1.0 / self._update_rate # s
self._is_running: bool = False
# Vehicle Sensor data to send through mavlink
self._sensor_data: SensorMsg = SensorMsg()
# Vehicle Rotor data received from mavlink
self._rotor_data: ThrusterControl = ThrusterControl(
config.num_rotors, config.input_offset, config.input_scaling, config.zero_position_armed
)
# Vehicle actuator control data
self._num_inputs: int = config.num_rotors
self._input_reference: np.ndarray = np.zeros((self._num_inputs,))
self._armed: bool = False
self._input_offset: np.ndarray = np.zeros((self._num_inputs,))
self._input_scaling: np.ndarray = np.zeros((self._num_inputs,))
# Select whether lockstep is enabled
self._enable_lockstep: bool = config.enable_lockstep
# Auxiliar variables to handle the lockstep between receiving sensor data and actuator control
self._received_first_actuator: bool = False
self._received_actuator: bool = False
# Auxiliar variables to check if we have already received an hearbeat from the software in the loop simulation
self._received_first_hearbeat: bool = False
self._last_heartbeat_sent_time = 0
# Auxiliar variables for setting the u_time when sending sensor data to px4
self._current_utime: int = 0
def update_sensor(self, sensor_type: str, data):
"""Method that is used as callback for the vehicle for every iteration that a sensor produces new data.
Only the IMU, GPS, Barometer and Magnetometer sensor data are stored to be sent through mavlink. Every other
sensor data that gets passed to this function is discarded.
Args:
sensor_type (str): A name that describes the type of sensor
data (dict): A dictionary that contains the data produced by the sensor
"""
if sensor_type == "IMU":
self.update_imu_data(data)
elif sensor_type == "GPS":
self.update_gps_data(data)
elif sensor_type == "Barometer":
self.update_bar_data(data)
elif sensor_type == "Magnetometer":
self.update_mag_data(data)
# If the data received is not from one of the above sensors, then this backend does
# not support that sensor and it will just ignore it
else:
pass
def update_imu_data(self, data):
"""Gets called by the 'update_sensor' method to update the current IMU data
Args:
data (dict): The data produced by an IMU sensor
"""
# Acelerometer data
self._sensor_data.xacc = data["linear_acceleration"][0]
self._sensor_data.yacc = data["linear_acceleration"][1]
self._sensor_data.zacc = data["linear_acceleration"][2]
# Gyro data
self._sensor_data.xgyro = data["angular_velocity"][0]
self._sensor_data.ygyro = data["angular_velocity"][1]
self._sensor_data.zgyro = data["angular_velocity"][2]
# Signal that we have new IMU data
self._sensor_data.new_imu_data = True
self._sensor_data.received_first_imu = True
def update_gps_data(self, data):
"""Gets called by the 'update_sensor' method to update the current GPS data
Args:
data (dict): The data produced by an GPS sensor
"""
# GPS data
self._sensor_data.fix_type = int(data["fix_type"])
self._sensor_data.latitude_deg = int(data["latitude"] * 10000000)
self._sensor_data.longitude_deg = int(data["longitude"] * 10000000)
self._sensor_data.altitude = int(data["altitude"] * 1000)
self._sensor_data.eph = int(data["eph"])
self._sensor_data.epv = int(data["epv"])
self._sensor_data.velocity = int(data["speed"] * 100)
self._sensor_data.velocity_north = int(data["velocity_north"] * 100)
self._sensor_data.velocity_east = int(data["velocity_east"] * 100)
self._sensor_data.velocity_down = int(data["velocity_down"] * 100)
self._sensor_data.cog = int(data["cog"] * 100)
self._sensor_data.satellites_visible = int(data["sattelites_visible"])
# Signal that we have new GPS data
self._sensor_data.new_gps_data = True
# Also update the groundtruth for the latitude and longitude
self._sensor_data.sim_lat = int(data["latitude_gt"] * 10000000)
self._sensor_data.sim_lon = int(data["longitude_gt"] * 10000000)
self._sensor_data.sim_alt = int(data["altitude_gt"] * 1000)
def update_bar_data(self, data):
"""Gets called by the 'update_sensor' method to update the current Barometer data
Args:
data (dict): The data produced by an Barometer sensor
"""
# Barometer data
self._sensor_data.temperature = data["temperature"]
self._sensor_data.abs_pressure = data["absolute_pressure"]
self._sensor_data.pressure_alt = data["pressure_altitude"]
# Signal that we have new Barometer data
self._sensor_data.new_bar_data = True
def update_mag_data(self, data):
"""Gets called by the 'update_sensor' method to update the current Vision data
Args:
data (dict): The data produced by an Vision sensor
"""
# Magnetometer data
self._sensor_data.xmag = data["magnetic_field"][0]
self._sensor_data.ymag = data["magnetic_field"][1]
self._sensor_data.zmag = data["magnetic_field"][2]
# Signal that we have new Magnetometer data
self._sensor_data.new_mag_data = True
def update_vision_data(self, data):
"""Method that 'in the future' will get called by the 'update_sensor' method to update the current Vision data
This callback is currently not being called (TODO in a future simulator version)
Args:
data (dict): The data produced by an Vision sensor
"""
# Vision or MOCAP data
self._sensor_data.vision_x = data["x"]
self._sensor_data.vision_y = data["y"]
self._sensor_data.vision_z = data["z"]
self._sensor_data.vision_roll = data["roll"]
self._sensor_data.vision_pitch = data["pitch"]
self._sensor_data.vision_yaw = data["yaw"]
# Signal that we have new vision or mocap data
self._sensor_data.new_vision_data = True
def update_state(self, state: State):
"""Method that is used as callback and gets called at every physics step with the current state of the vehicle.
This state is then stored in order to be sent as groundtruth via mavlink
Args:
state (State): The current state of the vehicle.
"""
# Get the quaternion in the convention [x, y, z, w]
attitude = state.get_attitude_ned_frd()
# Rotate the quaternion to the mavlink standard
self._sensor_data.sim_attitude[0] = attitude[3]
self._sensor_data.sim_attitude[1] = attitude[0]
self._sensor_data.sim_attitude[2] = attitude[1]
self._sensor_data.sim_attitude[3] = attitude[2]
# Get the angular velocity
ang_vel = state.get_angular_velocity_frd()
self._sensor_data.sim_angular_vel[0] = ang_vel[0]
self._sensor_data.sim_angular_vel[1] = ang_vel[1]
self._sensor_data.sim_angular_vel[2] = ang_vel[2]
# Get the acceleration
acc_vel = state.get_linear_acceleration_ned()
self._sensor_data.sim_acceleration[0] = int(acc_vel[0] * 1000)
self._sensor_data.sim_acceleration[1] = int(acc_vel[1] * 1000)
self._sensor_data.sim_acceleration[2] = int(acc_vel[2] * 1000)
# Get the latitude, longitude and altitude directly from the GPS
# Get the linear velocity of the vehicle in the inertial frame
lin_vel = state.get_linear_velocity_ned()
self._sensor_data.sim_velocity_inertial[0] = int(lin_vel[0] * 100)
self._sensor_data.sim_velocity_inertial[1] = int(lin_vel[1] * 100)
self._sensor_data.sim_velocity_inertial[2] = int(lin_vel[2] * 100)
# Compute the air_speed - assumed indicated airspeed due to flow aligned with pitot (body x)
body_vel = state.get_linear_body_velocity_ned_frd()
self._sensor_data.sim_ind_airspeed = int(body_vel[0] * 100)
self._sensor_data.sim_true_airspeed = int(np.linalg.norm(lin_vel) * 100) # TODO - add wind here
self._sensor_data.new_sim_state = True
def input_reference(self):
"""Method that when implemented, should return a list of desired angular velocities to apply to the vehicle rotors
"""
return self._rotor_data.input_reference
def __del__(self):
"""Gets called when the MavlinkBackend object gets destroyed. When this happens, we make sure
to close any mavlink connection open for this vehicle.
"""
# When this object gets destroyed, close the mavlink connection to free the communication port
try:
self._connection.close()
self._connection = None
except:
carb.log_info("Mavlink connection was not closed, because it was never opened")
def start(self):
"""Method that handles the begining of the simulation of vehicle. It will try to open the mavlink connection
interface and also attemp to launch px4 in a background process if that option as specified in the config class
"""
# If we are already running the mavlink interface, then ignore the function call
if self._is_running == True:
return
# If the connection no longer exists (we stoped and re-started the stream, then re_intialize the interface)
if self._connection is None:
self.re_initialize_interface()
# Set the flag to signal that the mavlink transmission has started
self._is_running = True
# Launch the PX4 in the background if needed
if self.px4_autolaunch and self.px4_tool is None:
carb.log_info("Attempting to launch PX4 in background process")
self.px4_tool = PX4LaunchTool(self.px4_dir, self._vehicle_id, self.px4_vehicle_model)
self.px4_tool.launch_px4()
def stop(self):
"""Method that when called will handle the stopping of the simulation of vehicle. It will make sure that any open
mavlink connection will be closed and also that the PX4 background process gets killed (if it was auto-initialized)
"""
# If the simulation was already stoped, then ignore the function call
if self._is_running == False:
return
# Set the flag so that we are no longer running the mavlink interface
self._is_running = False
# Close the mavlink connection
self._connection.close()
self._connection = None
# Close the PX4 if it was running
if self.px4_autolaunch and self.px4_autolaunch is not None:
carb.log_info("Attempting to kill PX4 background process")
self.px4_tool.kill_px4()
self.px4_tool = None
def reset(self):
"""For now does nothing. Here for compatibility purposes only
"""
return
def re_initialize_interface(self):
"""Auxiliar method used to get the MavlinkInterface to reset the MavlinkInterface to its initial state
"""
self._is_running = False
# Restart the sensor data
self._sensor_data = SensorMsg()
# Restart the connection
self._connection = mavutil.mavlink_connection(self._connection_port)
# Auxiliar variables to handle the lockstep between receiving sensor data and actuator control
self._received_first_actuator: bool = False
self._received_actuator: bool = False
# Auxiliar variables to check if we have already received an hearbeat from the software in the loop simulation
self._received_first_hearbeat: bool = False
self._last_heartbeat_sent_time = 0
def wait_for_first_hearbeat(self):
"""
Responsible for waiting for the first hearbeat. This method is locking and will only return
if an hearbeat is received via mavlink. When this first heartbeat is received poll for mavlink messages
"""
carb.log_warn("Waiting for first hearbeat")
result = self._connection.wait_heartbeat(blocking=False)
if result is not None:
self._received_first_hearbeat = True
carb.log_warn("Received first hearbeat")
def update(self, dt):
"""
Method that is called at every physics step to send data to px4 and receive the control inputs via mavlink
Args:
dt (float): The time elapsed between the previous and current function calls (s).
"""
# Check for the first hearbeat on the first few iterations
if not self._received_first_hearbeat:
self.wait_for_first_hearbeat()
return
# Check if we have already received IMU data. If not, start the lockstep and wait for more data
if self._sensor_data.received_first_imu:
while not self._sensor_data.new_imu_data and self._is_running:
# Just go for the next update and then try to check if we have new simulated sensor data
# DO not continue and get mavlink thrusters commands until we have simulated IMU data available
return
# Check if we have received any mavlink messages
self.poll_mavlink_messages()
# Send hearbeats at 1Hz
if (time.time() - self._last_heartbeat_sent_time) > 1.0 or self._received_first_hearbeat == False:
self.send_heartbeat()
self._last_heartbeat_sent_time = time.time()
# Update the current u_time for px4
self._current_utime += int(dt * 1000000)
# Send sensor messages
self.send_sensor_msgs(self._current_utime)
# Send the GPS messages
self.send_gps_msgs(self._current_utime)
def poll_mavlink_messages(self):
"""
Method that is used to check if new mavlink messages were received
"""
# If we have not received the first hearbeat yet, do not poll for mavlink messages
if self._received_first_hearbeat == False:
return
# Check if we need to lock and wait for actuator control data
needs_to_wait_for_actuator: bool = self._received_first_actuator and self._enable_lockstep
# Start by assuming that we have not received data for the actuators for the current step
self._received_actuator = False
# Use this loop to emulate a do-while loop (make sure this runs at least once)
while True:
# Try to get a message
msg = self._connection.recv_match(blocking=needs_to_wait_for_actuator)
# If a message was received
if msg is not None:
# Check if it is of the type that contains actuator controls
if msg.id == mavutil.mavlink.MAVLINK_MSG_ID_HIL_ACTUATOR_CONTROLS:
self._received_first_actuator = True
self._received_actuator = True
# Handle the control of the actuation commands received by PX4
self.handle_control(msg.time_usec, msg.controls, msg.mode, msg.flags)
# Check if we do not need to wait for an actuator message or we just received actuator input
# If so, break out of the infinite loop
if not needs_to_wait_for_actuator or self._received_actuator:
break
def send_heartbeat(self, mav_type=mavutil.mavlink.MAV_TYPE_GENERIC):
"""
Method that is used to publish an heartbear through mavlink protocol
Args:
mav_type (int): The ID that indicates the type of vehicle. Defaults to MAV_TYPE_GENERIC=0
"""
carb.log_info("Sending heartbeat")
# Note: to know more about these functions, go to pymavlink->dialects->v20->standard.py
# This contains the definitions for sending the hearbeat and simulated sensor messages
self._connection.mav.heartbeat_send(mav_type, mavutil.mavlink.MAV_AUTOPILOT_INVALID, 0, 0, 0)
def send_sensor_msgs(self, time_usec: int):
"""
Method that when invoked, will send the simulated sensor data through mavlink
Args:
time_usec (int): The total time elapsed since the simulation started
"""
carb.log_info("Sending sensor msgs")
# Check which sensors have new data to send
fields_updated: int = 0
if self._sensor_data.new_imu_data:
# Set the bit field to signal that we are sending updated accelerometer and gyro data
fields_updated = fields_updated | SensorSource.ACCEL | SensorSource.GYRO
self._sensor_data.new_imu_data = False
if self._sensor_data.new_mag_data:
# Set the bit field to signal that we are sending updated magnetometer data
fields_updated = fields_updated | SensorSource.MAG
self._sensor_data.new_mag_data = False
if self._sensor_data.new_bar_data:
# Set the bit field to signal that we are sending updated barometer data
fields_updated = fields_updated | SensorSource.BARO
self._sensor_data.new_bar_data = False
if self._sensor_data.new_press_data:
# Set the bit field to signal that we are sending updated diff pressure data
fields_updated = fields_updated | SensorSource.DIFF_PRESS
self._sensor_data.new_press_data = False
try:
self._connection.mav.hil_sensor_send(
time_usec,
self._sensor_data.xacc,
self._sensor_data.yacc,
self._sensor_data.zacc,
self._sensor_data.xgyro,
self._sensor_data.ygyro,
self._sensor_data.zgyro,
self._sensor_data.xmag,
self._sensor_data.ymag,
self._sensor_data.zmag,
self._sensor_data.abs_pressure,
self._sensor_data.diff_pressure,
self._sensor_data.pressure_alt,
self._sensor_data.altitude,
fields_updated,
)
except:
carb.log_warn("Could not send sensor data through mavlink")
def send_gps_msgs(self, time_usec: int):
"""
Method that is used to send simulated GPS data through the mavlink protocol.
Args:
time_usec (int): The total time elapsed since the simulation started
"""
carb.log_info("Sending GPS msgs")
# Do not send GPS data, if no new data was received
if not self._sensor_data.new_gps_data:
return
self._sensor_data.new_gps_data = False
# Latitude, longitude and altitude (all in integers)
try:
self._connection.mav.hil_gps_send(
time_usec,
self._sensor_data.fix_type,
self._sensor_data.latitude_deg,
self._sensor_data.longitude_deg,
self._sensor_data.altitude,
self._sensor_data.eph,
self._sensor_data.epv,
self._sensor_data.velocity,
self._sensor_data.velocity_north,
self._sensor_data.velocity_east,
self._sensor_data.velocity_down,
self._sensor_data.cog,
self._sensor_data.satellites_visible,
)
except:
carb.log_warn("Could not send gps data through mavlink")
def send_vision_msgs(self, time_usec: int):
"""
Method that is used to send simulated vision/mocap data through the mavlink protocol.
Args:
time_usec (int): The total time elapsed since the simulation started
"""
carb.log_info("Sending vision/mocap msgs")
# Do not send vision/mocap data, if not new data was received
if not self._sensor_data.new_vision_data:
return
self._sensor_data.new_vision_data = False
try:
self._connection.mav.global_vision_position_estimate_send(
time_usec,
self._sensor_data.vision_x,
self._sensor_data.vision_y,
self._sensor_data.vision_z,
self._sensor_data.vision_roll,
self._sensor_data.vision_pitch,
self._sensor_data.vision_yaw,
self._sensor_data.vision_covariance,
)
except:
carb.log_warn("Could not send vision/mocap data through mavlink")
def send_ground_truth(self, time_usec: int):
"""
Method that is used to send the groundtruth data of the vehicle through mavlink
Args:
time_usec (int): The total time elapsed since the simulation started
"""
carb.log_info("Sending groundtruth msgs")
# Do not send vision/mocap data, if not new data was received
if not self._sensor_data.new_sim_state or self._sensor_data.sim_alt == 0:
return
self._sensor_data.new_sim_state = False
try:
self._connection.mav.hil_state_quaternion_send(
time_usec,
self._sensor_data.sim_attitude,
self._sensor_data.sim_angular_vel[0],
self._sensor_data.sim_angular_vel[1],
self._sensor_data.sim_angular_vel[2],
self._sensor_data.sim_lat,
self._sensor_data.sim_lon,
self._sensor_data.sim_alt,
self._sensor_data.sim_velocity_inertial[0],
self._sensor_data.sim_velocity_inertial[1],
self._sensor_data.sim_velocity_inertial[2],
self._sensor_data.sim_ind_airspeed,
self._sensor_data.sim_true_airspeed,
self._sensor_data.sim_acceleration[0],
self._sensor_data.sim_acceleration[1],
self._sensor_data.sim_acceleration[2],
)
except:
carb.log_warn("Could not send groundtruth through mavlink")
def handle_control(self, time_usec, controls, mode, flags):
"""
Method that when received a control message, compute the forces simulated force that should be applied
on each rotor of the vehicle
Args:
time_usec (int): The total time elapsed since the simulation started - Ignored argument
controls (list): A list of ints which contains the thrust_control received via mavlink
flags: Ignored argument
"""
# Check if the vehicle is armed - Note: here we have to add a +1 since the code for armed is 128, but
# pymavlink is return 129 (the end of the buffer)
if mode == mavutil.mavlink.MAV_MODE_FLAG_SAFETY_ARMED + 1:
carb.log_info("Parsing control input")
# Set the rotor target speeds
self._rotor_data.update_input_reference(controls)
# If the vehicle is not armed, do not rotate the propellers
else:
self._rotor_data.zero_input_reference()
| 34,342 | Python | 39.884524 | 186 | 0.61697 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/logic/backends/tools/px4_launch_tool.py | """
| File: px4_launch_tool.py
| Author: Marcelo Jacinto ([email protected])
| Description: Defines an auxiliary tool to launch the PX4 process in the background
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
# System tools used to launch the px4 process in the brackground
import os
import tempfile
import subprocess
class PX4LaunchTool:
"""
A class that manages the start/stop of a px4 process. It requires only the path to the PX4 installation (assuming that
PX4 was already built with 'make px4_sitl_default none'), the vehicle id and the vehicle model.
"""
def __init__(self, px4_dir, vehicle_id: int = 0, px4_model: str = "gazebo-classic_iris"):
"""Construct the PX4LaunchTool object
Args:
px4_dir (str): A string with the path to the PX4-Autopilot directory
vehicle_id (int): The ID of the vehicle. Defaults to 0.
px4_model (str): The vehicle model. Defaults to "iris".
"""
# Attribute that will hold the px4 process once it is running
self.px4_process = None
# The vehicle id (used for the mavlink port open in the system)
self.vehicle_id = vehicle_id
# Configurations to whether autostart px4 (SITL) automatically or have the user launch it manually on another
# terminal
self.px4_dir = px4_dir
self.rc_script = self.px4_dir + "/ROMFS/px4fmu_common/init.d-posix/rcS"
# Create a temporary filesystem for px4 to write data to/from (and modify the origin rcS files)
self.root_fs = tempfile.TemporaryDirectory()
# Set the environement variables that let PX4 know which vehicle model to use internally
self.environment = os.environ
self.environment["PX4_SIM_MODEL"] = px4_model
def launch_px4(self):
"""
Method that will launch a px4 instance with the specified configuration
"""
self.px4_process = subprocess.Popen(
[
self.px4_dir + "/build/px4_sitl_default/bin/px4",
self.px4_dir + "/ROMFS/px4fmu_common/",
"-s",
self.rc_script,
"-i",
str(self.vehicle_id),
"-d",
],
cwd=self.root_fs.name,
shell=False,
env=self.environment,
)
def kill_px4(self):
"""
Method that will kill a px4 instance with the specified configuration
"""
if self.px4_process is not None:
self.px4_process.kill()
self.px4_process = None
def __del__(self):
"""
If the px4 process is still running when the PX4 launch tool object is whiped from memory, then make sure
we kill the px4 instance so we don't end up with hanged px4 instances
"""
# Make sure the PX4 process gets killed
if self.px4_process:
self.kill_px4()
# Make sure we clean the temporary filesystem used for the simulation
self.root_fs.cleanup()
# ---- Code used for debugging the px4 tool ----
def main():
px4_tool = PX4LaunchTool(os.environ["HOME"] + "/PX4-Autopilot")
px4_tool.launch_px4()
import time
time.sleep(60)
if __name__ == "__main__":
main()
| 3,332 | Python | 32 | 122 | 0.612545 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/logic/vehicles/vehicle.py | """
| File: vehicle.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Definition of the Vehicle class which is used as the base for all the vehicles.
"""
# Numerical computations
import numpy as np
from scipy.spatial.transform import Rotation
# Low level APIs
import carb
from pxr import Usd, Gf
# High level Isaac sim APIs
import omni.usd
from omni.isaac.core.world import World
from omni.isaac.core.utils.prims import define_prim, get_prim_at_path
from omni.usd import get_stage_next_free_path
from omni.isaac.core.robots.robot import Robot
# Extension APIs
from pegasus.simulator.logic.state import State
from pegasus.simulator.logic.interface.pegasus_interface import PegasusInterface
from pegasus.simulator.logic.vehicle_manager import VehicleManager
def get_world_transform_xform(prim: Usd.Prim):
"""
Get the local transformation of a prim using omni.usd.get_world_transform_matrix().
See https://docs.omniverse.nvidia.com/kit/docs/omni.usd/latest/omni.usd/omni.usd.get_world_transform_matrix.html
Args:
prim (Usd.Prim): The prim to calculate the world transformation.
Returns:
A tuple of:
- Translation vector.
- Rotation quaternion, i.e. 3d vector plus angle.
- Scale vector.
"""
world_transform: Gf.Matrix4d = omni.usd.get_world_transform_matrix(prim)
rotation: Gf.Rotation = world_transform.ExtractRotation()
return rotation
class Vehicle(Robot):
def __init__(
self,
stage_prefix: str,
usd_path: str = None,
init_pos=[0.0, 0.0, 0.0],
init_orientation=[0.0, 0.0, 0.0, 1.0],
):
"""
Class that initializes a vehicle in the isaac sim's curent stage
Args:
stage_prefix (str): The name the vehicle will present in the simulator when spawned. Defaults to "quadrotor".
usd_path (str): The USD file that describes the looks and shape of the vehicle. Defaults to "".
init_pos (list): The initial position of the vehicle in the inertial frame (in ENU convention). Defaults to [0.0, 0.0, 0.0].
init_orientation (list): The initial orientation of the vehicle in quaternion [qx, qy, qz, qw]. Defaults to [0.0, 0.0, 0.0, 1.0].
"""
# Get the current world at which we want to spawn the vehicle
self._world = PegasusInterface().world
self._current_stage = self._world.stage
# Save the name with which the vehicle will appear in the stage
# and the name of the .usd file that contains its description
self._stage_prefix = get_stage_next_free_path(self._current_stage, stage_prefix, False)
self._usd_file = usd_path
# Get the vehicle name by taking the last part of vehicle stage prefix
self._vehicle_name = self._stage_prefix.rpartition("/")[-1]
# Spawn the vehicle primitive in the world's stage
self._prim = define_prim(self._stage_prefix, "Xform")
self._prim = get_prim_at_path(self._stage_prefix)
self._prim.GetReferences().AddReference(self._usd_file)
# Initialize the "Robot" class
# Note: we need to change the rotation to have qw first, because NVidia
# does not keep a standard of quaternions inside its own libraries (not good, but okay)
super().__init__(
prim_path=self._stage_prefix,
name=self._stage_prefix,
position=init_pos,
orientation=[init_orientation[3], init_orientation[0], init_orientation[1], init_orientation[2]],
articulation_controller=None,
)
# Add this object for the world to track, so that if we clear the world, this object is deleted from memory and
# as a consequence, from the VehicleManager as well
self._world.scene.add(self)
# Add the current vehicle to the vehicle manager, so that it knows
# that a vehicle was instantiated
VehicleManager.get_vehicle_manager().add_vehicle(self._stage_prefix, self)
# Variable that will hold the current state of the vehicle
self._state = State()
# Motor that is given as reference
self._motor_speed = []
# Add a callback to the physics engine to update the current state of the system
self._world.add_physics_callback(self._stage_prefix + "/state", self.update_state)
# Add the update method to the physics callback if the world was received
# so that we can apply forces and torques to the vehicle. Note, this method should
# be implemented in classes that inherit the vehicle object
self._world.add_physics_callback(self._stage_prefix + "/update", self.update)
# Set the flag that signals if the simulation is running or not
self._sim_running = False
# Add a callback to start/stop of the simulation once the play/stop button is hit
self._world.add_timeline_callback(self._stage_prefix + "/start_stop_sim", self.sim_start_stop)
def __del__(self):
"""
Method that is invoked when a vehicle object gets destroyed. When this happens, we also invoke the
'remove_vehicle' from the VehicleManager in order to remove the vehicle from the list of active vehicles.
"""
# Remove this object from the vehicleHandler
VehicleManager.get_vehicle_manager().remove_vehicle(self._stage_prefix)
"""
Properties
"""
@property
def state(self):
"""The state of the vehicle.
Returns:
State: The current state of the vehicle, i.e., position, orientation, linear and angular velocities...
"""
return self._state
@property
def vehicle_name(self) -> str:
"""Vehicle name.
Returns:
Vehicle name (str): last prim name in vehicle prim path
"""
return self._vehicle_name
"""
Operations
"""
def sim_start_stop(self, event):
"""
Callback that is called every time there is a timeline event such as starting/stoping the simulation.
Args:
event: A timeline event generated from Isaac Sim, such as starting or stoping the simulation.
"""
# If the start/stop button was pressed, then call the start and stop methods accordingly
if self._world.is_playing() and self._sim_running == False:
self._sim_running = True
self.start()
if self._world.is_stopped() and self._sim_running == True:
self._sim_running = False
self.stop()
def apply_force(self, force, pos=[0.0, 0.0, 0.0], body_part="/body"):
"""
Method that will apply a force on the rigidbody, on the part specified in the 'body_part' at its relative position
given by 'pos' (following a FLU) convention.
Args:
force (list): A 3-dimensional vector of floats with the force [Fx, Fy, Fz] on the body axis of the vehicle according to a FLU convention.
pos (list): _description_. Defaults to [0.0, 0.0, 0.0].
body_part (str): . Defaults to "/body".
"""
# Get the handle of the rigidbody that we will apply the force to
rb = self._world.dc_interface.get_rigid_body(self._stage_prefix + body_part)
# Apply the force to the rigidbody. The force should be expressed in the rigidbody frame
self._world.dc_interface.apply_body_force(rb, carb._carb.Float3(force), carb._carb.Float3(pos), False)
def apply_torque(self, torque, body_part="/body"):
"""
Method that when invoked applies a given torque vector to /<rigid_body_name>/"body" or to /<rigid_body_name>/<body_part>.
Args:
torque (list): A 3-dimensional vector of floats with the force [Tx, Ty, Tz] on the body axis of the vehicle according to a FLU convention.
body_part (str): . Defaults to "/body".
"""
# Get the handle of the rigidbody that we will apply a torque to
rb = self._world.dc_interface.get_rigid_body(self._stage_prefix + body_part)
# Apply the torque to the rigidbody. The torque should be expressed in the rigidbody frame
self._world.dc_interface.apply_body_torque(rb, carb._carb.Float3(torque), False)
def update_state(self, dt: float):
"""
Method that is called at every physics step to retrieve and update the current state of the vehicle, i.e., get
the current position, orientation, linear and angular velocities and acceleration of the vehicle.
Args:
dt (float): The time elapsed between the previous and current function calls (s).
"""
# Get the body frame interface of the vehicle (this will be the frame used to get the position, orientation, etc.)
body = self._world.dc_interface.get_rigid_body(self._stage_prefix + "/body")
# Get the current position and orientation in the inertial frame
pose = self._world.dc_interface.get_rigid_body_pose(body)
# Get the attitude according to the convention [w, x, y, z]
prim = self._world.stage.GetPrimAtPath(self._stage_prefix + "/body")
rotation_quat = get_world_transform_xform(prim).GetQuaternion()
rotation_quat_real = rotation_quat.GetReal()
rotation_quat_img = rotation_quat.GetImaginary()
# Get the angular velocity of the vehicle expressed in the body frame of reference
ang_vel = self._world.dc_interface.get_rigid_body_angular_velocity(body)
# The linear velocity [x_dot, y_dot, z_dot] of the vehicle's body frame expressed in the inertial frame of reference
linear_vel = self._world.dc_interface.get_rigid_body_linear_velocity(body)
# Get the linear acceleration of the body relative to the inertial frame, expressed in the inertial frame
# Note: we must do this approximation, since the Isaac sim does not output the acceleration of the rigid body directly
linear_acceleration = (np.array(linear_vel) - self._state.linear_velocity) / dt
# Update the state variable X = [x,y,z]
self._state.position = np.array(pose.p)
# Get the quaternion according in the [qx,qy,qz,qw] standard
self._state.attitude = np.array(
[rotation_quat_img[0], rotation_quat_img[1], rotation_quat_img[2], rotation_quat_real]
)
# Express the velocity of the vehicle in the inertial frame X_dot = [x_dot, y_dot, z_dot]
self._state.linear_velocity = np.array(linear_vel)
# The linear velocity V =[u,v,w] of the vehicle's body frame expressed in the body frame of reference
# Note that: x_dot = Rot * V
self._state.linear_body_velocity = (
Rotation.from_quat(self._state.attitude).inv().apply(self._state.linear_velocity)
)
# omega = [p,q,r]
self._state.angular_velocity = Rotation.from_quat(self._state.attitude).inv().apply(np.array(ang_vel))
# The acceleration of the vehicle expressed in the inertial frame X_ddot = [x_ddot, y_ddot, z_ddot]
self._state.linear_acceleration = linear_acceleration
def start(self):
"""
Method that should be implemented by the class that inherits the vehicle object.
"""
pass
def stop(self):
"""
Method that should be implemented by the class that inherits the vehicle object.
"""
pass
def update(self, dt: float):
"""
Method that computes and applies the forces to the vehicle in
simulation based on the motor speed. This method must be implemented
by a class that inherits this type and it's called periodically by the physics engine.
Args:
dt (float): The time elapsed between the previous and current function calls (s).
"""
pass
| 11,970 | Python | 41.601423 | 150 | 0.653133 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/logic/vehicles/multirotor.py | """
| File: multirotor.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Definition of the Multirotor class which is used as the base for all the multirotor vehicles.
"""
import numpy as np
# The vehicle interface
from pegasus.simulator.logic.vehicles.vehicle import Vehicle
# Mavlink interface
from pegasus.simulator.logic.backends.mavlink_backend import MavlinkBackend
# Sensors and dynamics setup
from pegasus.simulator.logic.dynamics import LinearDrag
from pegasus.simulator.logic.thrusters import QuadraticThrustCurve
from pegasus.simulator.logic.sensors import Barometer, IMU, Magnetometer, GPS
from pegasus.simulator.logic.interface.pegasus_interface import PegasusInterface
class MultirotorConfig:
"""
A data class that is used for configuring a Multirotor
"""
def __init__(self):
"""
Initialization of the MultirotorConfig class
"""
# Stage prefix of the vehicle when spawning in the world
self.stage_prefix = "quadrotor"
# The USD file that describes the visual aspect of the vehicle (and some properties such as mass and moments of inertia)
self.usd_file = ""
# The default thrust curve for a quadrotor and dynamics relating to drag
self.thrust_curve = QuadraticThrustCurve()
self.drag = LinearDrag([0.50, 0.30, 0.0])
# The default sensors for a quadrotor
self.sensors = [Barometer(), IMU(), Magnetometer(), GPS()]
# The default graphs
self.graphs = []
# The backends for actually sending commands to the vehicle. By default use mavlink (with default mavlink configurations)
# [Can be None as well, if we do not desired to use PX4 with this simulated vehicle]. It can also be a ROS2 backend
# or your own custom Backend implementation!
self.backends = [MavlinkBackend()]
class Multirotor(Vehicle):
"""Multirotor class - It defines a base interface for creating a multirotor
"""
def __init__(
self,
# Simulation specific configurations
stage_prefix: str = "quadrotor",
usd_file: str = "",
vehicle_id: int = 0,
# Spawning pose of the vehicle
init_pos=[0.0, 0.0, 0.07],
init_orientation=[0.0, 0.0, 0.0, 1.0],
config=MultirotorConfig(),
):
"""Initializes the multirotor object
Args:
stage_prefix (str): The name the vehicle will present in the simulator when spawned. Defaults to "quadrotor".
usd_file (str): The USD file that describes the looks and shape of the vehicle. Defaults to "".
vehicle_id (int): The id to be used for the vehicle. Defaults to 0.
init_pos (list): The initial position of the vehicle in the inertial frame (in ENU convention). Defaults to [0.0, 0.0, 0.07].
init_orientation (list): The initial orientation of the vehicle in quaternion [qx, qy, qz, qw]. Defaults to [0.0, 0.0, 0.0, 1.0].
config (_type_, optional): _description_. Defaults to MultirotorConfig().
"""
# 1. Initiate the Vehicle object itself
super().__init__(stage_prefix, usd_file, init_pos, init_orientation)
# 2. Initialize all the vehicle sensors
self._sensors = config.sensors
for sensor in self._sensors:
sensor.initialize(PegasusInterface().latitude, PegasusInterface().longitude, PegasusInterface().altitude)
# Add callbacks to the physics engine to update each sensor at every timestep
# and let the sensor decide depending on its internal update rate whether to generate new data
self._world.add_physics_callback(self._stage_prefix + "/Sensors", self.update_sensors)
# 3. Initialize all the vehicle graphs
self._graphs = config.graphs
for graph in self._graphs:
graph.initialize(self)
# 4. Setup the dynamics of the system
# Get the thrust curve of the vehicle from the configuration
self._thrusters = config.thrust_curve
self._drag = config.drag
# 5. Save the backend interface (if given in the configuration of the multirotor)
# and initialize them
self._backends = config.backends
for backend in self._backends:
backend.initialize(self)
# Add a callbacks for the
self._world.add_physics_callback(self._stage_prefix + "/mav_state", self.update_sim_state)
def update_sensors(self, dt: float):
"""Callback that is called at every physics steps and will call the sensor.update method to generate new
sensor data. For each data that the sensor generates, the backend.update_sensor method will also be called for
every backend. For example, if new data is generated for an IMU and we have a MavlinkBackend, then the update_sensor
method will be called for that backend so that this data can latter be sent thorugh mavlink.
Args:
dt (float): The time elapsed between the previous and current function calls (s).
"""
# Call the update method for the sensor to update its values internally (if applicable)
for sensor in self._sensors:
sensor_data = sensor.update(self._state, dt)
# If some data was updated and we have a mavlink backend or ros backend (or other), then just update it
if sensor_data is not None:
for backend in self._backends:
backend.update_sensor(sensor.sensor_type, sensor_data)
def update_sim_state(self, dt: float):
"""
Callback that is used to "send" the current state for each backend being used to control the vehicle. This callback
is called on every physics step.
Args:
dt (float): The time elapsed between the previous and current function calls (s).
"""
for backend in self._backends:
backend.update_state(self._state)
def start(self):
"""
Intializes the communication with all the backends. This method is invoked automatically when the simulation starts
"""
for backend in self._backends:
backend.start()
def stop(self):
"""
Signal all the backends that the simulation has stoped. This method is invoked automatically when the simulation stops
"""
for backend in self._backends:
backend.stop()
def update(self, dt: float):
"""
Method that computes and applies the forces to the vehicle in simulation based on the motor speed.
This method must be implemented by a class that inherits this type. This callback
is called on every physics step.
Args:
dt (float): The time elapsed between the previous and current function calls (s).
"""
# Get the articulation root of the vehicle
articulation = self._world.dc_interface.get_articulation(self._stage_prefix)
# Get the desired angular velocities for each rotor from the first backend (can be mavlink or other) expressed in rad/s
if len(self._backends) != 0:
desired_rotor_velocities = self._backends[0].input_reference()
else:
desired_rotor_velocities = [0.0 for i in range(self._thrusters._num_rotors)]
# Input the desired rotor velocities in the thruster model
self._thrusters.set_input_reference(desired_rotor_velocities)
# Get the desired forces to apply to the vehicle
forces_z, _, rolling_moment = self._thrusters.update(self._state, dt)
# Apply force to each rotor
for i in range(4):
# Apply the force in Z on the rotor frame
self.apply_force([0.0, 0.0, forces_z[i]], body_part="/rotor" + str(i))
# Generate the rotating propeller visual effect
self.handle_propeller_visual(i, forces_z[i], articulation)
# Apply the torque to the body frame of the vehicle that corresponds to the rolling moment
self.apply_torque([0.0, 0.0, rolling_moment], "/body")
# Compute the total linear drag force to apply to the vehicle's body frame
drag = self._drag.update(self._state, dt)
self.apply_force(drag, body_part="/body")
# Call the update methods in all backends
for backend in self._backends:
backend.update(dt)
def handle_propeller_visual(self, rotor_number, force: float, articulation):
"""
Auxiliar method used to set the joint velocity of each rotor (for animation purposes) based on the
amount of force being applied on each joint
Args:
rotor_number (int): The number of the rotor to generate the rotation animation
force (float): The force that is being applied on that rotor
articulation (_type_): The articulation group the joints of the rotors belong to
"""
# Rotate the joint to yield the visual of a rotor spinning (for animation purposes only)
joint = self._world.dc_interface.find_articulation_dof(articulation, "joint" + str(rotor_number))
# Spinning when armed but not applying force
if 0.0 < force < 0.1:
self._world.dc_interface.set_dof_velocity(joint, 5 * self._thrusters.rot_dir[rotor_number])
# Spinning when armed and applying force
elif 0.1 <= force:
self._world.dc_interface.set_dof_velocity(joint, 100 * self._thrusters.rot_dir[rotor_number])
# Not spinning
else:
self._world.dc_interface.set_dof_velocity(joint, 0)
def force_and_torques_to_velocities(self, force: float, torque: np.ndarray):
"""
Auxiliar method used to get the target angular velocities for each rotor, given the total desired thrust [N] and
torque [Nm] to be applied in the multirotor's body frame.
Note: This method assumes a quadratic thrust curve. This method will be improved in a future update,
and a general thrust allocation scheme will be adopted. For now, it is made to work with multirotors directly.
Args:
force (np.ndarray): A vector of the force to be applied in the body frame of the vehicle [N]
torque (np.ndarray): A vector of the torque to be applied in the body frame of the vehicle [Nm]
Returns:
list: A list of angular velocities [rad/s] to apply in reach rotor to accomplish suchs forces and torques
"""
# Get the body frame of the vehicle
rb = self._world.dc_interface.get_rigid_body(self._stage_prefix + "/body")
# Get the rotors of the vehicle
rotors = [self._world.dc_interface.get_rigid_body(self._stage_prefix + "/rotor" + str(i)) for i in range(self._thrusters._num_rotors)]
# Get the relative position of the rotors with respect to the body frame of the vehicle (ignoring the orientation for now)
relative_poses = self._world.dc_interface.get_relative_body_poses(rb, rotors)
# Define the alocation matrix
aloc_matrix = np.zeros((4, self._thrusters._num_rotors))
# Define the first line of the matrix (T [N])
aloc_matrix[0, :] = np.array(self._thrusters._rotor_constant)
# Define the second and third lines of the matrix (\tau_x [Nm] and \tau_y [Nm])
aloc_matrix[1, :] = np.array([relative_poses[i].p[1] * self._thrusters._rotor_constant[i] for i in range(self._thrusters._num_rotors)])
aloc_matrix[2, :] = np.array([-relative_poses[i].p[0] * self._thrusters._rotor_constant[i] for i in range(self._thrusters._num_rotors)])
# Define the forth line of the matrix (\tau_z [Nm])
aloc_matrix[3, :] = np.array([self._thrusters._rolling_moment_coefficient[i] * self._thrusters._rot_dir[i] for i in range(self._thrusters._num_rotors)])
# Compute the inverse allocation matrix, so that we can get the angular velocities (squared) from the total thrust and torques
aloc_inv = np.linalg.pinv(aloc_matrix)
# Compute the target angular velocities (squared)
squared_ang_vel = aloc_inv @ np.array([force, torque[0], torque[1], torque[2]])
# Making sure that there is no negative value on the target squared angular velocities
squared_ang_vel[squared_ang_vel < 0] = 0.0
# ------------------------------------------------------------------------------------------------
# Saturate the inputs while preserving their relation to each other, by performing a normalization
# ------------------------------------------------------------------------------------------------
max_thrust_vel_squared = np.power(self._thrusters.max_rotor_velocity[0], 2)
max_val = np.max(squared_ang_vel)
if max_val >= max_thrust_vel_squared:
normalize = np.maximum(max_val / max_thrust_vel_squared, 1.0)
squared_ang_vel = squared_ang_vel / normalize
# Compute the angular velocities for each rotor in [rad/s]
ang_vel = np.sqrt(squared_ang_vel)
return ang_vel
| 13,252 | Python | 45.501754 | 160 | 0.644582 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/logic/vehicles/multirotors/iris.py | # Copyright (c) 2023, Marcelo Jacinto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from pegasus.simulator.logic.vehicles.multirotor import Multirotor, MultirotorConfig
# Sensors and dynamics setup
from pegasus.simulator.logic.dynamics import LinearDrag
from pegasus.simulator.logic.thrusters import QuadraticThrustCurve
from pegasus.simulator.logic.sensors import Barometer, IMU, Magnetometer, GPS
# Mavlink interface
from pegasus.simulator.logic.backends.mavlink_backend import MavlinkBackend
# Get the location of the IRIS asset
from pegasus.simulator.params import ROBOTS
class IrisConfig(MultirotorConfig):
def __init__(self):
# Stage prefix of the vehicle when spawning in the world
self.stage_prefix = "quadrotor"
# The USD file that describes the visual aspect of the vehicle (and some properties such as mass and moments of inertia)
self.usd_file = ROBOTS["Iris"]
# The default thrust curve for a quadrotor and dynamics relating to drag
self.thrust_curve = QuadraticThrustCurve()
self.drag = LinearDrag([0.50, 0.30, 0.0])
# The default sensors for a quadrotor
self.sensors = [Barometer(), IMU(), Magnetometer(), GPS()]
# The backends for actually sending commands to the vehicle. By default use mavlink (with default mavlink configurations)
# [Can be None as well, if we do not desired to use PX4 with this simulated vehicle]. It can also be a ROS2 backend
# or your own custom Backend implementation!
self.backends = [MavlinkBackend()]
class Iris(Multirotor):
def __init__(self, id: int, world, init_pos=[0.0, 0.0, 0.07, init_orientation=[0.0, 0.0, 0.0, 1.0]], config=IrisConfig()):
super.__init__(config.stage_prefix, config.usd_file, id, world, init_pos, init_orientation, config=config) | 1,850 | Python | 42.046511 | 129 | 0.721081 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/logic/dynamics/drag.py | """
| File: drag.py
| Author: Marcelo Jacinto ([email protected])
| Description: Base interface used to implement forces that should actuate on a rigidbody such as linear drag
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
from pegasus.simulator.logic.state import State
class Drag:
"""
Class that serves as a template for the implementation of Drag forces that actuate on a rigid body
"""
def __init__(self):
"""
Receives as input the drag coefficients of the vehicle as a 3x1 vector of constants
"""
@property
def drag(self):
"""The drag force to be applied on the body frame of the vehicle
Returns:
list: A list with len==3 containing the drag force to be applied on the rigid body according to a FLU body reference
frame, expressed in Newton (N) [dx, dy, dz]
"""
return [0.0, 0.0, 0.0]
def update(self, state: State, dt: float):
"""Method that should be implemented to update the drag force to be applied on the body frame of the vehicle
Args:
state (State): The current state of the vehicle.
dt (float): The time elapsed between the previous and current function calls (s).
Returns:
list: A list with len==3 containing the drag force to be applied on the rigid body according to a FLU body reference
"""
return [0.0, 0.0, 0.0]
| 1,481 | Python | 36.049999 | 128 | 0.649561 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/logic/dynamics/linear_drag.py | """
| File: linear_drag.py
| Author: Marcelo Jacinto ([email protected])
| Description: Computes the forces that should actuate on a rigidbody affected by linear drag
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
import numpy as np
from pegasus.simulator.logic.dynamics.drag import Drag
from pegasus.simulator.logic.state import State
class LinearDrag(Drag):
"""
Class that implements linear drag computations afftecting a rigid body. It inherits the Drag base class.
"""
def __init__(self, drag_coefficients=[0.0, 0.0, 0.0]):
"""
Receives as input the drag coefficients of the vehicle as a 3x1 vector of constants
Args:
drag_coefficients (list[float]): The constant linear drag coefficients to used to compute the total drag forces
affecting the rigid body. The linear drag is given by diag(dx, dy, dz) * [v_x, v_y, v_z] where the velocities
are expressed in the body frame of the rigid body (using the FRU frame convention).
"""
# Initialize the base Drag class
super().__init__()
# The linear drag coefficients of the vehicle's body frame
self._drag_coefficients = np.diag(drag_coefficients)
# The drag force to apply on the vehicle's body frame
self._drag_force = np.array([0.0, 0.0, 0.0])
@property
def drag(self):
"""The drag force to be applied on the body frame of the vehicle
Returns:
list: A list with len==3 containing the drag force to be applied on the rigid body according to a FLU body reference
frame, expressed in Newton (N) [dx, dy, dz]
"""
return self._drag_force
def update(self, state: State, dt: float):
"""Method that updates the drag force to be applied on the body frame of the vehicle. The total drag force
applied on the body reference frame (FLU convention) is given by diag(dx,dy,dz) * R' * v
where v is the velocity of the vehicle expressed in the inertial frame and R' * v = velocity_body_frame
Args:
state (State): The current state of the vehicle.
dt (float): The time elapsed between the previous and current function calls (s).
Returns:
list: A list with len==3 containing the drag force to be applied on the rigid body according to a FLU body reference
"""
# Get the velocity of the vehicle expressed in the body frame of reference
body_vel = state.linear_body_velocity
# Compute the component of the drag force to be applied in the body frame
self._drag_force = -np.dot(self._drag_coefficients, body_vel)
return self._drag_force
| 2,762 | Python | 42.171874 | 128 | 0.663287 |
PegasusSimulator/PegasusSimulator/extensions/pegasus.simulator/pegasus/simulator/tests/test_hello_world.py | # NOTE:
# omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests
# For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html
import omni.kit.test
# Extnsion for writing UI tests (simulate UI interaction)
import omni.kit.ui_test as ui_test
# Import extension python module we are testing with absolute import path, as if we are external user (other extension)
import pegasus.simulator
# Having a test class dervived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test
class Test(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
pass
# After running each test
async def tearDown(self):
pass
# Actual test, notice it is "async" function, so "await" can be used if needed
async def test_hello_public_function(self):
result = pegasus.simulator.some_public_function(4)
self.assertEqual(result, 256)
async def test_window_button(self):
# Find a label in our window
label = ui_test.find("My Window//Frame/**/Label[*]")
# Find buttons in our window
add_button = ui_test.find("My Window//Frame/**/Button[*].text=='Add'")
reset_button = ui_test.find("My Window//Frame/**/Button[*].text=='Reset'")
# Click reset button
await reset_button.click()
self.assertEqual(label.widget.text, "empty")
await add_button.click()
self.assertEqual(label.widget.text, "count: 1")
await add_button.click()
self.assertEqual(label.widget.text, "count: 2")
| 1,669 | Python | 35.304347 | 142 | 0.683044 |
PegasusSimulator/PegasusSimulator/docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# This section is responsible for auto-generating the API documentation
import os
import sys
sys.path.insert(0, os.path.abspath("../extensions/pegasus.simulator"))
sys.path.insert(0, os.path.abspath("../extensions/pegasus.simulator/pegasus/simulator"))
# -- Project information -----------------------------------------------------
project = "Pegasus Simulator"
copyright = "2023, Marcelo Jacinto"
author = "Marcelo Jacinto"
version = "1.0.0"
# -- General configuration ---------------------------------------------------
extensions = [
"sphinx.ext.duration",
"sphinx.ext.doctest",
"sphinx.ext.autodoc",
"autodocsumm",
'sphinx.ext.napoleon',
#"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"myst_parser",
"sphinx.ext.mathjax",
"sphinxcontrib.bibtex",
"sphinx.ext.todo",
"sphinx.ext.githubpages",
"sphinx.ext.autosectionlabel",
"sphinxcontrib.youtube",
"myst_parser"
]
intersphinx_mapping = {
"rtd": ("https://docs.readthedocs.io/en/stable/", None),
"python": ("https://docs.python.org/3/", None),
"sphinx": ("https://www.sphinx-doc.org/en/master/", None),
}
# mathjax hacks
mathjax3_config = {
"tex": {
"inlineMath": [["\\(", "\\)"]],
"displayMath": [["\\[", "\\]"]],
},
}
intersphinx_disabled_domains = ["std"]
# supported file extensions for source files
#source_suffix = {
# ".rst": "restructuredtext"
#}
templates_path = ["_templates"]
suppress_warnings = ["myst.header", "autosectionlabel.*"]
# -- Options for EPUB output
epub_show_urls = "footnote"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "README.md", "licenses/*"]
# --- Automatic API documentation generation
# put type hints inside the description instead of the signature (easier to read)
autodoc_typehints = "description"
autodoc_typehints_description_target = "documented"
# document class *and* __init__ methods
autoclass_content = "class" #
# separate class docstring from __init__ docstring
autodoc_class_signature = "separated"
# sort members by source order
autodoc_member_order = "groupwise"
# default autodoc settings
autodoc_default_options = {
"autosummary": True,
}
# BibTeX configuration
bibtex_bibfiles = ["bibliography.bib"]
# Generate documentation for __special__ methods
napoleon_include_special_with_doc = True
# Mock out modules that are not available on RTD
autodoc_mock_imports = [
"np",
"torch",
"numpy",
"scipy",
"carb",
"pxr",
"omni",
"omni.kit",
"omni.usd",
"omni.isaac.core.utils.nucleus",
"omni.client",
"pxr.PhysxSchema",
"pxr.PhysicsSchemaTools",
"omni.replicator",
"omni.isaac.core",
"omni.isaac.kit",
"omni.isaac.cloner",
"gym",
"stable_baselines3",
"rsl_rl",
"rl_games",
"ray",
"h5py",
"hid",
"prettytable",
"pyyaml",
"pymavlink",
"rclpy",
"std_msgs",
"sensor_msgs",
"geometry_msgs"
]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages.
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_logo = "_static/logo.png"
html_theme_options = {
'logo_only': True,
'display_version': False,
'style_nav_header_background': '#FFD700'
}
html_show_copyright = True
html_show_sphinx = False
# The master toctree document.
master_doc = "index"
| 4,372 | Python | 26.853503 | 88 | 0.645014 |
Conv-AI/ov_extension/exts/convai/convai/extension.py | import math, os
import asyncio
import numpy as np
import omni.ext
import carb.events
import omni.ui as ui
import configparser
import pyaudio
import grpc
from .rpc import service_pb2 as convai_service_msg
from .rpc import service_pb2_grpc as convai_service
from .convai_audio_player import ConvaiAudioPlayer
from typing import Generator
import io
from pydub import AudioSegment
import threading
import traceback
import time
from collections import deque
import random
from functools import partial
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 12000
def log(text: str, warning: bool =False):
print(f"[convai] {'[Warning]' if warning else ''} {text}")
class ConvaiExtension(omni.ext.IExt):
WINDOW_NAME = "Convai"
MENU_PATH = f"Window/{WINDOW_NAME}"
def on_startup(self, ext_id: str):
self.IsCapturingAudio = False
self.on_new_frame_sub = None
self.channel_address = None
self.channel = None
self.SessionID = None
self.channelState = grpc.ChannelConnectivity.IDLE
self.client = None
self.ConvaiGRPCGetResponseProxy = None
self.PyAudio = pyaudio.PyAudio()
self.stream = None
self.Tick = False
self.TickThread = None
self.ConvaiAudioPlayer = ConvaiAudioPlayer(self._on_start_talk_callback, self._on_stop_talk_callback)
self.LastReadyTranscription = ""
self.ResponseTextBuffer = ""
self.OldCharacterID = ""
self.response_UI_Label_text = ""
self.action_UI_Label_text = "<Action>"
self.transcription_UI_Label_text = ""
# self.response_UI_Label_text = "<Response will apear here>"
self.response_UI_Label_text = "" # Turn off response text due to unknown crash
self.StartTalking_Btn_text = "Start Talking"
self.StartTalking_Btn_state = True
self.UI_Lock = threading.Lock()
self.Mic_Lock = threading.Lock()
self.UI_update_counter = 0
self.on_new_update_sub = None
ui.Workspace.set_show_window_fn(ConvaiExtension.WINDOW_NAME, partial(self.show_window, None))
ui.Workspace.show_window(ConvaiExtension.WINDOW_NAME)
# # Put the new menu
editor_menu = omni.kit.ui.get_editor_menu()
if editor_menu:
self._menu = editor_menu.add_item(
ConvaiExtension.MENU_PATH, self.show_window, toggle=True, value=True
)
# self.show_window(None, True)
self.read_channel_address_from_config()
self.create_channel()
log("ConvaiExtension started")
def setup_UI(self):
self._window = ui.Window(ConvaiExtension.WINDOW_NAME, width=300, height=300)
self._window.set_visibility_changed_fn(self._visiblity_changed_fn)
with self._window.frame:
with ui.VStack():
with ui.HStack(height = ui.Length(30)):
l = ui.Label("Convai API key")
self.APIKey_input_UI = ui.StringField()
ui.Spacer(height=5)
with ui.HStack(height = ui.Length(30)):
l = ui.Label("Character ID")
self.CharID_input_UI = ui.StringField()
ui.Spacer(height=5)
# with ui.HStack(height = ui.Length(30)):
# l = ui.Label("Session(Leave empty for 1st time)")
# self.session_input_UI = ui.StringField()
# ui.Spacer(height=5)
with ui.HStack(height = ui.Length(30)):
l = ui.Label("Comma seperated actions")
self.actions_input_UI = ui.StringField()
self.actions_input_UI.set_tooltip("e.g. Dances, Jumps")
ui.Spacer(height=5)
# self.response_UI_Label = ui.Label("", height = ui.Length(60), word_wrap = True)
# self.response_UI_Label.alignment = ui.Alignment.CENTER
self.action_UI_Label = ui.Label("<Action>", height = ui.Length(30), word_wrap = False)
self.action_UI_Label.alignment = ui.Alignment.CENTER
ui.Spacer(height=5)
self.StartTalking_Btn = ui.Button("Start Talking", clicked_fn=lambda: self.on_start_talking_btn_click(), height = ui.Length(30))
self.transcription_UI_Label = ui.Label("", height = ui.Length(60), word_wrap = True)
self.transcription_UI_Label.alignment = ui.Alignment.CENTER
if self.on_new_update_sub is None:
self.on_new_update_sub = (
omni.kit.app.get_app()
.get_update_event_stream()
.create_subscription_to_pop(self._on_UI_update_event, name="convai new UI update")
)
self.read_UI_from_config()
return self._window
def _on_UI_update_event(self, e):
if self.UI_update_counter>1000:
self.UI_update_counter = 0
self.UI_update_counter += 1
if self._window is None:
return
if self.UI_Lock.locked():
log("UI_Lock is locked", 1)
return
with self.UI_Lock:
# self.response_UI_Label.text = str(self.response_UI_Label_text)
self.action_UI_Label.text = str(self.action_UI_Label_text)
self.transcription_UI_Label.text = str(self.transcription_UI_Label_text)
self.StartTalking_Btn.text = self.StartTalking_Btn_text
self.StartTalking_Btn.enabled = self.StartTalking_Btn_state
def start_tick(self):
if self.Tick:
log("Tick already started", 1)
return
self.Tick = True
self.TickThread = threading.Thread(target=self._on_tick)
self.TickThread.start()
def stop_tick(self):
if self.TickThread and self.Tick:
self.Tick = False
self.TickThread.join()
def read_channel_address_from_config(self):
config = configparser.ConfigParser()
config.read(os.path.join(__location__, 'convai.env'))
self.channel_address = config.get("CONVAI", "CHANNEL")
def read_UI_from_config(self):
config = configparser.ConfigParser()
config.read(os.path.join(__location__, 'convai.env'))
api_key = config.get("CONVAI", "API_KEY")
self.APIKey_input_UI.model.set_value(api_key)
character_id = config.get("CONVAI", "CHARACTER_ID")
self.CharID_input_UI.model.set_value(character_id)
actions_text = config.get("CONVAI", "ACTIONS")
self.actions_input_UI.model.set_value(actions_text)
def save_config(self):
config = configparser.ConfigParser()
config.read(os.path.join(__location__, 'convai.env'))
config.set("CONVAI", "API_KEY", self.APIKey_input_UI.model.get_value_as_string())
config.set("CONVAI", "CHARACTER_ID", self.CharID_input_UI.model.get_value_as_string())
config.set("CONVAI", "ACTIONS", self.actions_input_UI.model.get_value_as_string())
# config.set("CONVAI", "CHANNEL", self.channel_address)
with open(os.path.join(__location__, 'convai.env'), 'w') as file:
config.write(file)
def create_channel(self):
if (self.channel):
log("gRPC channel already created")
return
self.channel = grpc.secure_channel(self.channel_address, grpc.ssl_channel_credentials())
# self.channel.subscribe(self.on_channel_state_change, True)
log("Created gRPC channel")
def close_channel(self):
if (self.channel):
self.channel.close()
self.channel = None
log("close_channel - Closed gRPC channel")
else:
log("close_channel - gRPC channel already closed")
def on_start_talking_btn_click(self):
if (self.IsCapturingAudio):
# Change UI
with self.UI_Lock:
self.StartTalking_Btn_text = "Processing..."
# self.StartTalking_Btn_text = "Start Talking"
self.StartTalking_Btn_state = False
# Reset response UI text
self.response_UI_Label_text = ""
# Do one last mic read
self.read_mic_and_send_to_grpc(True)
# time.sleep(0.01)
# Stop Mic
self.stop_mic()
else:
# Reset Session ID if Character ID changes
if self.OldCharacterID != self.CharID_input_UI.model.get_value_as_string():
self.OldCharacterID = self.CharID_input_UI.model.get_value_as_string()
self.SessionID = ""
with self.UI_Lock:
# Reset transcription UI text
self.transcription_UI_Label_text = ""
self.LastReadyTranscription = ""
# Change Btn text
self.StartTalking_Btn_text = "Stop"
# Open Mic stream
self.start_mic()
# Stop any on-going audio
self.ConvaiAudioPlayer.stop()
# Save API key, character ID and session ID
self.save_config()
# Create gRPC stream
self.ConvaiGRPCGetResponseProxy = ConvaiGRPCGetResponseProxy(self)
def on_shutdown(self):
self.clean_grpc_stream()
self.close_channel()
self.stop_tick()
if self._menu:
self._menu = None
if self._window:
self._window.destroy()
self._window = None
# Deregister the function that shows the window from omni.ui
ui.Workspace.set_show_window_fn(ConvaiExtension.WINDOW_NAME, None)
log("ConvaiExtension shutdown")
def start_mic(self):
if self.IsCapturingAudio == True:
log("start_mic - mic is already capturing audio", 1)
return
self.stream = self.PyAudio.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
self.IsCapturingAudio = True
self.start_tick()
log("start_mic - Started Recording")
def stop_mic(self):
if self.IsCapturingAudio == False:
log("stop_mic - mic has not started yet", 1)
return
self.stop_tick()
if self.stream:
self.stream.stop_stream()
self.stream.close()
else:
log("stop_mic - could not close mic stream since it is None", 1)
self.IsCapturingAudio = False
log("stop_mic - Stopped Recording")
def clean_grpc_stream(self):
if self.ConvaiGRPCGetResponseProxy:
self.ConvaiGRPCGetResponseProxy.Parent = None
del self.ConvaiGRPCGetResponseProxy
self.ConvaiGRPCGetResponseProxy = None
# self.close_channel()
def on_transcription_received(self, Transcription: str, IsTranscriptionReady: bool, IsFinal: bool):
'''
Called when user transcription is received
'''
self.UI_Lock.acquire()
self.transcription_UI_Label_text = self.LastReadyTranscription + " " + Transcription
self.UI_Lock.release()
if IsTranscriptionReady:
self.LastReadyTranscription = self.LastReadyTranscription + " " + Transcription
def on_data_received(self, ReceivedText: str, ReceivedAudio: bytes, SampleRate: int, IsFinal: bool):
'''
Called when new text and/or Audio data is received
'''
self.ResponseTextBuffer += str(ReceivedText)
if IsFinal:
with self.UI_Lock:
self.response_UI_Label_text = self.ResponseTextBuffer
self.transcription_UI_Label_text = self.ResponseTextBuffer
self.ResponseTextBuffer = ""
self.ConvaiAudioPlayer.append_to_stream(ReceivedAudio)
return
def on_actions_received(self, Action: str):
'''
Called when actions are received
'''
# Action.replace(".", "")
self.UI_Lock.acquire()
for InputAction in self.parse_actions():
# log (f"on_actions_received: {Action} - {InputAction} - {InputAction.find(Action)}")
if Action.find(InputAction) >= 0:
self.action_UI_Label_text = InputAction
self.fire_event(InputAction)
self.UI_Lock.release()
return
self.action_UI_Label_text = "None"
self.UI_Lock.release()
def on_session_ID_received(self, SessionID: str):
'''
Called when new SessionID is received
'''
self.SessionID = SessionID
def on_finish(self):
'''
Called when the response stream is done
'''
self.ConvaiGRPCGetResponseProxy = None
with self.UI_Lock:
self.StartTalking_Btn_text = "Start Talking"
self.StartTalking_Btn_state = True
self.clean_grpc_stream()
log("Received on_finish")
def on_failure(self, ErrorMessage: str):
'''
Called when there is an unsuccessful response
'''
log(f"on_failure called with message: {ErrorMessage}", 1)
with self.UI_Lock:
self.transcription_UI_Label_text = "ERROR: Please double check API key and the character ID - Send logs to [email protected] for further assistance."
self.stop_mic()
self.on_finish()
def _on_tick(self):
while self.Tick:
time.sleep(0.1)
if self.IsCapturingAudio == False or self.ConvaiGRPCGetResponseProxy is None:
continue
self.read_mic_and_send_to_grpc(False)
def _on_start_talk_callback(self):
self.fire_event("start")
log("Character Started Talking")
def _on_stop_talk_callback(self):
self.fire_event("stop")
log("Character Stopped Talking")
def read_mic_and_send_to_grpc(self, LastWrite):
with self.Mic_Lock:
if self.stream:
data = self.stream.read(CHUNK)
else:
log("read_mic_and_send_to_grpc - could not read mic stream since it is none", 1)
data = bytes()
if self.ConvaiGRPCGetResponseProxy:
self.ConvaiGRPCGetResponseProxy.write_audio_data_to_send(data, LastWrite)
else:
log("read_mic_and_send_to_grpc - ConvaiGRPCGetResponseProxy is not valid", 1)
def fire_event(self, event_name):
def registered_event_name(event_name):
"""Returns the internal name used for the given custom event name"""
n = "omni.graph.action." + event_name
return carb.events.type_from_string(n)
reg_event_name = registered_event_name(event_name)
message_bus = omni.kit.app.get_app().get_message_bus_event_stream()
message_bus.push(reg_event_name, payload={})
def parse_actions(self):
actions = ["None"] + self.actions_input_UI.model.get_value_as_string().split(',')
actions = [a.lstrip(" ").rstrip(" ") for a in actions]
return actions
def show_window(self, menu, value):
# with self.UI_Lock:
if value:
self.setup_UI()
self._window.set_visibility_changed_fn(self._visiblity_changed_fn)
else:
if self._window:
self._window.visible = False
def _visiblity_changed_fn(self, visible):
# with self.UI_Lock:
# Called when the user pressed "X"
self._set_menu(visible)
if not visible:
# Destroy the window, since we are creating new window
# in show_window
asyncio.ensure_future(self._destroy_window_async())
def _set_menu(self, value):
"""Set the menu to create this window on and off"""
editor_menu = omni.kit.ui.get_editor_menu()
if editor_menu:
editor_menu.set_value(ConvaiExtension.MENU_PATH, value)
async def _destroy_window_async(self):
# with self.UI_Lock:
# wait one frame, this is due to the one frame defer
# in Window::_moveToMainOSWindow()
await omni.kit.app.get_app().next_update_async()
if self._window:
self._window.destroy()
self._window = None
class ConvaiGRPCGetResponseProxy:
def __init__(self, Parent: ConvaiExtension):
self.Parent = Parent
self.AudioBuffer = deque(maxlen=4096*2)
self.InformOnDataReceived = False
self.LastWriteReceived = False
self.client = None
self.NumberOfAudioBytesSent = 0
self.call = None
self._write_task = None
self._read_task = None
# self._main_task = asyncio.ensure_future(self.activate())
self.activate()
log("ConvaiGRPCGetResponseProxy constructor")
def activate(self):
# Validate API key
if (len(self.Parent.APIKey_input_UI.model.get_value_as_string()) == 0):
self.Parent.on_failure("API key is empty")
return
# Validate Character ID
if (len(self.Parent.CharID_input_UI.model.get_value_as_string()) == 0):
self.Parent.on_failure("Character ID is empty")
return
# Validate Channel
if self.Parent.channel is None:
log("grpc - self.Parent.channel is None", 1)
self.Parent.on_failure("gRPC channel was not created")
return
# Create the stub
self.client = convai_service.ConvaiServiceStub(self.Parent.channel)
threading.Thread(target=self.init_stream).start()
def init_stream(self):
log("grpc - stream initialized")
try:
for response in self.client.GetResponse(self.create_getGetResponseRequests()):
if response.HasField("audio_response"):
log("gRPC - audio_response: {} {} {}".format(response.audio_response.audio_config, response.audio_response.text_data, response.audio_response.end_of_response))
log("gRPC - session_id: {}".format(response.session_id))
self.Parent.on_session_ID_received(response.session_id)
self.Parent.on_data_received(
response.audio_response.text_data,
response.audio_response.audio_data,
response.audio_response.audio_config.sample_rate_hertz,
response.audio_response.end_of_response)
elif response.HasField("action_response"):
log(f"gRPC - action_response: {response.action_response.action}")
self.Parent.on_actions_received(response.action_response.action)
elif response.HasField("user_query"):
log(f"gRPC - user_query: {response.user_query}")
self.Parent.on_transcription_received(response.user_query.text_data, response.user_query.is_final, response.user_query.end_of_response)
else:
log("Stream Message: {}".format(response))
time.sleep(0.1)
except Exception as e:
if 'response' in locals() and response is not None and response.HasField("audio_response"):
self.Parent.on_failure(f"gRPC - Exception caught in loop: {str(e)} - Stream Message: {response}")
else:
self.Parent.on_failure(f"gRPC - Exception caught in loop: {str(e)}")
traceback.print_exc()
return
self.Parent.on_finish()
def create_initial_GetResponseRequest(self)-> convai_service_msg.GetResponseRequest:
action_config = convai_service_msg.ActionConfig(
classification = 'singlestep',
context_level = 1
)
action_config.actions[:] = self.Parent.parse_actions()
action_config.objects.append(
convai_service_msg.ActionConfig.Object(
name = "dummy",
description = "A dummy object."
)
)
log(f"gRPC - actions parsed: {action_config.actions}")
action_config.characters.append(
convai_service_msg.ActionConfig.Character(
name = "User",
bio = "Person playing the game and asking questions."
)
)
get_response_config = convai_service_msg.GetResponseRequest.GetResponseConfig(
character_id = self.Parent.CharID_input_UI.model.get_value_as_string(),
api_key = self.Parent.APIKey_input_UI.model.get_value_as_string(),
audio_config = convai_service_msg.AudioConfig(
sample_rate_hertz = RATE
),
action_config = action_config
)
if self.Parent.SessionID and self.Parent.SessionID != "":
get_response_config.session_id = self.Parent.SessionID
return convai_service_msg.GetResponseRequest(get_response_config = get_response_config)
def create_getGetResponseRequests(self)-> Generator[convai_service_msg.GetResponseRequest, None, None]:
req = self.create_initial_GetResponseRequest()
yield req
# for i in range(0, 10):
while 1:
IsThisTheFinalWrite = False
GetResponseData = None
if (0): # check if this is a text request
pass
else:
data, IsThisTheFinalWrite = self.consume_from_audio_buffer()
if len(data) == 0 and IsThisTheFinalWrite == False:
time.sleep(0.05)
continue
# Load the audio data to the request
self.NumberOfAudioBytesSent += len(data)
# if len(data):
# log(f"len(data) = {len(data)}")
GetResponseData = convai_service_msg.GetResponseRequest.GetResponseData(audio_data = data)
# Prepare the request
req = convai_service_msg.GetResponseRequest(get_response_data = GetResponseData)
yield req
if IsThisTheFinalWrite:
log(f"gRPC - Done Writing - {self.NumberOfAudioBytesSent} audio bytes sent")
break
time.sleep(0.1)
def write_audio_data_to_send(self, Data: bytes, LastWrite: bool):
self.AudioBuffer.append(Data)
if LastWrite:
self.LastWriteReceived = True
log(f"gRPC LastWriteReceived")
# if self.InformOnDataReceived:
# # Inform of new data to send
# self._write_task = asyncio.ensure_future(self.write_stream())
# # Reset
# self.InformOnDataReceived = False
def finish_writing(self):
self.write_audio_data_to_send(bytes(), True)
def consume_from_audio_buffer(self):
Length = len(self.AudioBuffer)
IsThisTheFinalWrite = False
data = bytes()
if Length:
data = self.AudioBuffer.pop()
# self.AudioBuffer = bytes()
if self.LastWriteReceived and Length == 0:
IsThisTheFinalWrite = True
else:
IsThisTheFinalWrite = False
if IsThisTheFinalWrite:
log(f"gRPC Consuming last mic write")
return data, IsThisTheFinalWrite
def __del__(self):
self.Parent = None
# if self._main_task:
# self._main_task.cancel()
# if self._write_task:
# self._write_task.cancel()
# if self._read_task:
# self._read_task.cancel()
# if self.call:
# self.call.cancel()
log("ConvaiGRPCGetResponseProxy Destructor")
| 23,850 | Python | 36.4427 | 179 | 0.584151 |
Conv-AI/ov_extension/exts/convai/convai/convai_audio_player.py | # from .extension import ConvaiExtension, log
# from test import ConvaiExtension, log
import pyaudio
from pydub import AudioSegment
import io
class ConvaiAudioPlayer:
def __init__(self, start_taking_callback, stop_talking_callback):
self.start_talking_callback = start_taking_callback
self.stop_talking_callback = stop_talking_callback
self.AudioSegment = None
self.pa = pyaudio.PyAudio()
self.pa_stream = None
self.IsPlaying = False
def append_to_stream(self, data: bytes):
segment = AudioSegment.from_wav(io.BytesIO(data)).fade_in(100).fade_out(100)
if self.AudioSegment is None:
self.AudioSegment = segment
else:
self.AudioSegment._data += segment._data
self.play()
def play(self):
if self.IsPlaying:
return
print("ConvaiAudioPlayer - Started playing")
self.start_talking_callback()
self.pa_stream = self.pa.open(
format=pyaudio.get_format_from_width(self.AudioSegment.sample_width),
channels=self.AudioSegment.channels,
rate=self.AudioSegment.frame_rate,
output=True,
stream_callback=self.stream_callback
)
self.IsPlaying = True
def pause(self):
'''
Pause playing
'''
self.IsPlaying = False
def stop(self):
'''
Pause playing and clear audio
'''
self.pause()
self.AudioSegment = None
def stream_callback(self, in_data, frame_count, time_info, status_flags):
if not self.IsPlaying:
frames = bytes()
else:
frames = self.consume_frames(frame_count)
if self.AudioSegment and len(frames) < frame_count*self.AudioSegment.frame_width:
print("ConvaiAudioPlayer - Stopped playing")
self.stop_talking_callback()
self.IsPlaying = False
return frames, pyaudio.paComplete
else:
return frames, pyaudio.paContinue
def consume_frames(self, count: int):
if self.AudioSegment is None:
return bytes()
FrameEnd = self.AudioSegment.frame_width*count
if FrameEnd > len(self.AudioSegment._data):
return bytes()
FramesToReturn = self.AudioSegment._data[0:FrameEnd]
if FrameEnd == len(self.AudioSegment._data):
self.AudioSegment._data = bytes()
else:
self.AudioSegment._data = self.AudioSegment._data[FrameEnd:]
# print("self.AudioSegment._data = self.AudioSegment._data[FrameEnd:]")
return FramesToReturn
if __name__ == '__main__':
import time
import pyaudio
import grpc
from rpc import service_pb2 as convai_service_msg
from rpc import service_pb2_grpc as convai_service
from typing import Generator
import io
from pydub import AudioSegment
import configparser
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
RECORD_SECONDS = 3
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
audio_player = ConvaiAudioPlayer(None)
def start_mic():
global stream
stream = PyAudio.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
print("start_mic - Started Recording")
def stop_mic():
global stream
if stream:
stream.stop_stream()
stream.close()
else:
print("stop_mic - could not close mic stream since it is None")
return
print("stop_mic - Stopped Recording")
def getGetResponseRequests(api_key: str, character_id: str, session_id: str = "") -> Generator[convai_service_msg.GetResponseRequest, None, None]:
action_config = convai_service_msg.ActionConfig(
classification = 'multistep',
context_level = 1
)
action_config.actions[:] = ["fetch", "jump", "dance", "swim"]
action_config.objects.append(
convai_service_msg.ActionConfig.Object(
name = "ball",
description = "A round object that can bounce around."
)
)
action_config.objects.append(
convai_service_msg.ActionConfig.Object(
name = "water",
description = "Liquid found in oceans, seas and rivers that you can swim in. You can also drink it."
)
)
action_config.characters.append(
convai_service_msg.ActionConfig.Character(
name = "User",
bio = "Person playing the game and asking questions."
)
)
action_config.characters.append(
convai_service_msg.ActionConfig.Character(
name = "Learno",
bio = "A medieval farmer from a small village."
)
)
get_response_config = convai_service_msg.GetResponseRequest.GetResponseConfig(
character_id = character_id,
api_key = api_key,
audio_config = convai_service_msg.AudioConfig(
sample_rate_hertz = 16000
),
action_config = action_config
)
# session_id = "f50b7bf00ad50f5c2c22065965948c16"
if session_id != "":
get_response_config.session_id = session_id
yield convai_service_msg.GetResponseRequest(
get_response_config = get_response_config
)
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
yield convai_service_msg.GetResponseRequest(
get_response_data = convai_service_msg.GetResponseRequest.GetResponseData(
audio_data = data
)
)
stream.stop_stream()
stream.close()
print("* recording stopped")
config = configparser.ConfigParser()
config.read("exts\convai\convai\convai.env")
api_key = config.get("CONVAI", "API_KEY")
character_id = config.get("CONVAI", "CHARACTER_ID")
channel_address = config.get("CONVAI", "CHANNEL")
channel = grpc.secure_channel(channel_address, grpc.ssl_channel_credentials())
client = convai_service.ConvaiServiceStub(channel)
for response in client.GetResponse(getGetResponseRequests(api_key, character_id)):
if response.HasField("audio_response"):
print("Stream Message: {} {} {}".format(response.session_id, response.audio_response.audio_config, response.audio_response.text_data))
audio_player.append_to_stream(response.audio_response.audio_data)
else:
print("Stream Message: {}".format(response))
p.terminate()
# start_mic()
time.sleep(10)
# while 1:
# audio_player = ConvaiAudioPlayer(None)
# # data = stream.read(CHUNK)
# # _, data = scipy.io.wavfile.read("F:/Work/Convai/Tests/Welcome.wav")
# f = open("F:/Work/Convai/Tests/Welcome.wav", "rb")
# data = f.read()
# print(type(data))
# audio_player.append_to_stream(data)
# time.sleep(0.2)
# break
# # stop_mic()
# time.sleep(2)
# with keyboard.Listener(on_press=on_press,on_release=on_release):
# while(1):
# time.sleep(0.1)
# continue
# print("running") | 7,714 | Python | 32.986784 | 150 | 0.577651 |
Conv-AI/ov_extension/exts/convai/convai/rpc/service_pb2_grpc.py | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from . import service_pb2 as service__pb2
class ConvaiServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Hello = channel.unary_unary(
'/service.ConvaiService/Hello',
request_serializer=service__pb2.HelloRequest.SerializeToString,
response_deserializer=service__pb2.HelloResponse.FromString,
)
self.HelloStream = channel.stream_stream(
'/service.ConvaiService/HelloStream',
request_serializer=service__pb2.HelloRequest.SerializeToString,
response_deserializer=service__pb2.HelloResponse.FromString,
)
self.SpeechToText = channel.stream_stream(
'/service.ConvaiService/SpeechToText',
request_serializer=service__pb2.STTRequest.SerializeToString,
response_deserializer=service__pb2.STTResponse.FromString,
)
self.GetResponse = channel.stream_stream(
'/service.ConvaiService/GetResponse',
request_serializer=service__pb2.GetResponseRequest.SerializeToString,
response_deserializer=service__pb2.GetResponseResponse.FromString,
)
self.GetResponseSingle = channel.unary_stream(
'/service.ConvaiService/GetResponseSingle',
request_serializer=service__pb2.GetResponseRequestSingle.SerializeToString,
response_deserializer=service__pb2.GetResponseResponse.FromString,
)
class ConvaiServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def Hello(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def HelloStream(self, request_iterator, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SpeechToText(self, request_iterator, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetResponse(self, request_iterator, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetResponseSingle(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ConvaiServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Hello': grpc.unary_unary_rpc_method_handler(
servicer.Hello,
request_deserializer=service__pb2.HelloRequest.FromString,
response_serializer=service__pb2.HelloResponse.SerializeToString,
),
'HelloStream': grpc.stream_stream_rpc_method_handler(
servicer.HelloStream,
request_deserializer=service__pb2.HelloRequest.FromString,
response_serializer=service__pb2.HelloResponse.SerializeToString,
),
'SpeechToText': grpc.stream_stream_rpc_method_handler(
servicer.SpeechToText,
request_deserializer=service__pb2.STTRequest.FromString,
response_serializer=service__pb2.STTResponse.SerializeToString,
),
'GetResponse': grpc.stream_stream_rpc_method_handler(
servicer.GetResponse,
request_deserializer=service__pb2.GetResponseRequest.FromString,
response_serializer=service__pb2.GetResponseResponse.SerializeToString,
),
'GetResponseSingle': grpc.unary_stream_rpc_method_handler(
servicer.GetResponseSingle,
request_deserializer=service__pb2.GetResponseRequestSingle.FromString,
response_serializer=service__pb2.GetResponseResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'service.ConvaiService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class ConvaiService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def Hello(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/service.ConvaiService/Hello',
service__pb2.HelloRequest.SerializeToString,
service__pb2.HelloResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def HelloStream(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_stream(request_iterator, target, '/service.ConvaiService/HelloStream',
service__pb2.HelloRequest.SerializeToString,
service__pb2.HelloResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SpeechToText(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_stream(request_iterator, target, '/service.ConvaiService/SpeechToText',
service__pb2.STTRequest.SerializeToString,
service__pb2.STTResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetResponse(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_stream(request_iterator, target, '/service.ConvaiService/GetResponse',
service__pb2.GetResponseRequest.SerializeToString,
service__pb2.GetResponseResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetResponseSingle(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/service.ConvaiService/GetResponseSingle',
service__pb2.GetResponseRequestSingle.SerializeToString,
service__pb2.GetResponseResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 8,631 | Python | 42.376884 | 111 | 0.636543 |
Steigner/Isaac-ur_rtde/isaac_rtde.py | # MIT License
# Copyright (c) 2023 Fravebot
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Author: Martin Juříček
# Isaac Sim app library
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
# Isaac Sim extenstions + core libraries
from omni.isaac.motion_generation.lula import RmpFlow
from omni.isaac.motion_generation import ArticulationMotionPolicy
from omni.isaac.core.robots import Robot
from omni.isaac.core.objects import cuboid
from omni.isaac.core import World
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.motion_generation.interface_config_loader import (
load_supported_motion_policy_config,
)
# ur rtde communication
import rtde_control
import rtde_receive
import numpy as np
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
"--robot-ip",
type=str,
default="127.0.0.1",
help="IP adress of robot Real world UR Polyscope or VM UR Polyscope",
)
arg = parser.parse_args()
# set up paths and prims
robot_name = "UR5e"
prim_path = "/UR5e"
usd_path = get_assets_root_path() + "/Isaac/Robots/UniversalRobots/ur5e/ur5e.usd"
# set references to staget in isaac
add_reference_to_stage(usd_path=usd_path, prim_path=prim_path)
# add world
my_world = World(stage_units_in_meters=1.0)
my_world.scene.add_default_ground_plane()
# add robot to world
robot = my_world.scene.add(Robot(prim_path=prim_path, name=robot_name))
# The load_supported_motion_policy_config() function is currently the simplest way to load supported robots.
# In the future, Isaac Sim will provide a centralized registry of robots with Lula robot description files
# and RMP configuration files stored alongside the robot USD.
rmp_config = load_supported_motion_policy_config(robot_name, "RMPflow")
# Initialize an RmpFlow object and set up
rmpflow = RmpFlow(**rmp_config)
physics_dt = 1.0/60
articulation_rmpflow = ArticulationMotionPolicy(robot, rmpflow, physics_dt)
articulation_controller = robot.get_articulation_controller()
# Make a target to follow
target_cube = cuboid.VisualCuboid(
"/World/target", position=np.array([0.5, 0, 0.5]), color=np.array([1.0, 0, 0]), size=0.1, scale=np.array([0.5,0.5,0.5])
)
# Make an obstacle to avoid
ground = cuboid.VisualCuboid(
"/World/ground", position=np.array([0.0, 0, -0.0525]), color=np.array([0, 1.0, 0]), size=0.1, scale=np.array([40,40,1])
)
rmpflow.add_obstacle(ground)
# prereset world
my_world.reset()
# IP adress of robot Real world UR Polyscope or VM UR Polyscope
try:
rtde_r = rtde_receive.RTDEReceiveInterface(arg.robot_ip)
rtde_c = rtde_control.RTDEControlInterface(arg.robot_ip)
robot.set_joint_positions(np.array(rtde_r.getActualQ()))
except:
print("[ERROR] Robot is not connected")
# close isaac sim
simulation_app.close()
sys.exit()
while simulation_app.is_running():
# on step render
my_world.step(render=True)
if my_world.is_playing():
# first frame -> reset world
if my_world.current_time_step_index == 0:
my_world.reset()
# set target to RMP Flow
rmpflow.set_end_effector_target(
target_position=target_cube.get_world_pose()[0], target_orientation=target_cube.get_world_pose()[1]
)
# Parameters
velocity = 0.1
acceleration = 0.1
dt = 1.0/500 # 2ms
lookahead_time = 0.1
gain = 300
# jointq = get joints positions
joint_q = robot.get_joint_positions()
# time start period
t_start = rtde_c.initPeriod()
# run servoJ
rtde_c.servoJ(joint_q, velocity, acceleration, dt, lookahead_time, gain)
rtde_c.waitPeriod(t_start)
# Query the current obstacle position
rmpflow.update_world()
actions = articulation_rmpflow.get_next_articulation_action()
articulation_controller.apply_action(actions)
# get actual q from robot and update isaac model
robot.set_joint_positions(np.array(rtde_r.getActualQ()))
# rtde control stop script and disconnect
rtde_c.servoStop()
rtde_c.stopScript()
rtde_r.disconnect()
# close isaac sim
simulation_app.close() | 5,292 | Python | 33.148387 | 123 | 0.717876 |
HC2ER/OmniverseExtension-hnadi.tools.exploded_view/hnadi/tools/exploded_view/exploded_view.py | from .utils import get_name_from_path, get_pure_list
import omni.usd
import omni.kit.commands
from pxr import Usd, Sdf, Gf
# ----------------------------------------------------SELECT-------------------------------------------------------------
def select_explode_Xform(x_coord, y_coord, z_coord, x_ratio, y_ratio, z_ratio):
global original_path
global current_model_path
global item_count
global default_pivot
global item_list0
global translate_list0
# Get current stage and active prim_paths
stage = omni.usd.get_context().get_stage()
selected_prim_path = omni.usd.get_context().get_selection().get_selected_prim_paths()
if not selected_prim_path:
return
# A: If the whole group is selected
if len(selected_prim_path) == 1:
# Test members
selected_prim_path = omni.usd.get_context().get_selection().get_selected_prim_paths()
group_prim = stage.GetPrimAtPath(selected_prim_path[0])
children_prims_list = group_prim.GetChildren()
# If no members
if len(children_prims_list) <= 1:
print("Please select a valid group or all items at once!")
return
else:
original_path = selected_prim_path
item_count = len(children_prims_list)
omni.kit.commands.execute('CopyPrim',
path_from= selected_prim_path[0],
path_to='/World/Exploded_Model',
exclusive_select=False)
selected_prim_path = omni.usd.get_context().get_selection().get_selected_prim_paths()
# print(selected_prim_path[-1])
# sub_group_prim = stage.GetPrimAtPath(selected_prim_path[0])
# sub_children_prims_list = group_prim.GetChildren()
# original_path = selected_prim_path
# item_count = len(selected_prim_path)
# for i in sub_children_prim_list:
# name = get_name_from_path(i)
# name_list.append(name)
omni.kit.commands.execute('SelectPrims',
old_selected_paths=selected_prim_path,
new_selected_paths=[selected_prim_path[-1]],
expand_in_stage=True)
# B: If multiple prims are selected separately
else:
original_path = selected_prim_path
item_count = len(selected_prim_path)
name_list = []
group_list = []
for i in selected_prim_path:
name = get_name_from_path(i)
name_list.append(name)
# Copy
omni.kit.commands.execute('CopyPrim',
path_from = i,
path_to ='/World/item_01',
exclusive_select=False)
if selected_prim_path.index(i)<= 8:
group_list.append(f'/World/item_0{selected_prim_path.index(i)+1}')
else:
group_list.append(f'/World/item_{selected_prim_path.index(i)+1}')
# Group
omni.kit.commands.execute('GroupPrims',
prim_paths=group_list)
# Change group name
selected_prim_path = omni.usd.get_context().get_selection().get_selected_prim_paths()
omni.kit.commands.execute('MovePrims',
paths_to_move={selected_prim_path[0]: '/World/Exploded_Model'})
# obj = stage.GetObjectAtPath(selected_prim_path[0])
# default_pivot = obj.GetAttribute('xformOp:translate:pivot').Get()
# Change members names back
selected_prim_path = omni.usd.get_context().get_selection().get_selected_prim_paths()
group_prim = stage.GetPrimAtPath(selected_prim_path[0])
children_prims_list = group_prim.GetChildren()
# Move members out of the group
for i in children_prims_list:
ind = children_prims_list.index(i)
if ind <= 8:
omni.kit.commands.execute('MovePrims',
paths_to_move={f"{selected_prim_path[0]}/item_0{ind+1}": f"{selected_prim_path[0]}/" + name_list[ind]})
else:
omni.kit.commands.execute('MovePrims',
paths_to_move={f"{selected_prim_path[0]}/item_{ind+1}": f"{selected_prim_path[0]}/" + name_list[ind]})
# Choose Exploded_Model and get current path,count,pivot
selected_prim_path = omni.usd.get_context().get_selection().get_selected_prim_paths()
current_model_path = selected_prim_path
obj = stage.GetObjectAtPath(selected_prim_path[0])
default_pivot = obj.GetAttribute('xformOp:translate:pivot').Get()
print(obj)
print(default_pivot)
# Get origin translate_list
outer_group_prim = stage.GetPrimAtPath(current_model_path[0])
children_prims_list = outer_group_prim.GetChildren()
item_list0 = children_prims_list
translate_list0 = []
for i in children_prims_list:
sub_children_prim_list = i.GetChildren()
if len(sub_children_prim_list) <= 1:
translate = i.GetAttribute('xformOp:translate').Get()
else:
translate = i.GetAttribute('xformOp:translate:pivot').Get()
translate_list0.append(translate)
# print("--------------------------------------------")
# print(original_path)
# print(current_model_path)
# print(item_count)
# print(default_pivot)
# print(item_list0)
# print(translate_list0)
# print("--------------------------------------------")
# Create Explosion_Centre
omni.kit.commands.execute('CreatePrimWithDefaultXform',
prim_type='Xform',
attributes={})
selected_prim_path = omni.usd.get_context().get_selection().get_selected_prim_paths()
world_pivot_path = selected_prim_path
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path(f"{world_pivot_path[0]}" + ".xformOp:translate"),
value=Gf.Vec3d(default_pivot[0], default_pivot[1], default_pivot[2]),
prev=Gf.Vec3d(0, 0, 0))
obj1 = stage.GetObjectAtPath(selected_prim_path[0])
default_pivot = obj1.GetAttribute('xformOp:translate').Get()
omni.kit.commands.execute('MovePrims',
paths_to_move={f"{world_pivot_path[0]}": f"{current_model_path[0]}/" + "Explosion_Centre"})
# Set_default_button_value
x_coord.model.set_value(default_pivot[0])
y_coord.model.set_value(default_pivot[1])
z_coord.model.set_value(default_pivot[2])
x_ratio.model.set_value(0)
y_ratio.model.set_value(0)
z_ratio.model.set_value(0)
# End
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[],
new_selected_paths=[f'{current_model_path[0]}'],
expand_in_stage=True)
return
#------------------------------------------------------REMOVE-----------------------------------------------------------
def remove_item(x_coord, y_coord, z_coord, x_ratio, y_ratio, z_ratio):
try:
global original_path
global current_model_path
global item_count
global default_pivot
global item_list0
global translate_list0
stage = omni.usd.get_context().get_stage()
selected_prim_path = omni.usd.get_context().get_selection().get_selected_prim_paths()
if not selected_prim_path:
return
# Remove correct items
for i in selected_prim_path:
path = str(i)
name = get_name_from_path(path)
if path != f"{current_model_path[0]}/" + "Explosion_Centre":
if path != current_model_path[0] and path.find(current_model_path[0]) != -1:
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[],
new_selected_paths=[f'{current_model_path[0]}'],
expand_in_stage=True)
selected_prim_path = omni.usd.get_context().get_selection().get_selected_prim_paths()
obj0 = stage.GetObjectAtPath(selected_prim_path[0])
children_prims_list_z0 = obj0.GetChildren()
new_count = len(children_prims_list_z0)
if new_count == 2:
print("Cannot remove the only item in Exploded_Model!")
return
else:
# Restore values to 0 to record the postions
x = x_ratio.model.get_value_as_float()
y = y_ratio.model.get_value_as_float()
z = z_ratio.model.get_value_as_float()
# If 0,pass
if x == 0.0 and y== 0.0 and z == 0.0:
pass
# If not, set 0
else:
x_ratio.model.set_value(0.0)
y_ratio.model.set_value(0.0)
z_ratio.model.set_value(0.0)
omni.kit.commands.execute('MovePrim',
path_from = path,
path_to = "World/" + name)
else:
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[],
new_selected_paths=[f'{current_model_path[0]}'],
expand_in_stage=True)
print("Please select a valid item to remove!")
else:
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[],
new_selected_paths=[f'{current_model_path[0]}'],
expand_in_stage=True)
print("Cannot remove Explosion_Centre!")
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[],
new_selected_paths=[f'{current_model_path[0]}'],
expand_in_stage=True)
selected_prim_path = omni.usd.get_context().get_selection().get_selected_prim_paths()
obj = stage.GetObjectAtPath(selected_prim_path[0])
children_prims_list_z = obj.GetChildren()
new_count2 = len(children_prims_list_z) -1
# If any item is removed
if new_count2 < item_count:
# Refresh item_count
item_count = new_count2
obj = stage.GetObjectAtPath(selected_prim_path[0])
# Refresh item_list0 and translate_list0
outer_group_prim = stage.GetPrimAtPath(current_model_path[0])
children_prims_list0 = outer_group_prim.GetChildren()
children_prims_list = get_pure_list(children_prims_list0)
item_list0 = children_prims_list
translate_list0 = []
for i in children_prims_list:
sub_children_prim_list = i.GetChildren()
if len(sub_children_prim_list) <= 1:
translate = i.GetAttribute('xformOp:translate').Get()
else:
translate = i.GetAttribute('xformOp:translate:pivot').Get()
translate_list0.append(translate)
# Refresh pivot
group_list = []
name_list = []
for i in item_list0:
item_path = str(i.GetPath())
name = get_name_from_path(item_path)
name_list.append(name)
group_list.append(item_path)
# S1 group
omni.kit.commands.execute('GroupPrims',
prim_paths=group_list)
# change group name
selected_prim_path = omni.usd.get_context().get_selection().get_selected_prim_paths()
omni.kit.commands.execute('MovePrims',
paths_to_move={selected_prim_path[0]: f"{current_model_path[0]}/Sub_Exploded_Model"})
# S2 Get new pivot by group
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[],
new_selected_paths=[f"{current_model_path[0]}/Sub_Exploded_Model"],
expand_in_stage=True)
selected_prim_path = omni.usd.get_context().get_selection().get_selected_prim_paths()
obj = stage.GetObjectAtPath(selected_prim_path[0])
default_pivot = obj.GetAttribute('xformOp:translate:pivot').Get()
# S3 Move members out of the group
group_path = selected_prim_path
outer_group_prim = stage.GetPrimAtPath(group_path[0])
children_prims_list = outer_group_prim.GetChildren()
for i in children_prims_list:
index = children_prims_list.index(i)
name = name_list[index]
omni.kit.commands.execute('MovePrim',
path_from = f"{group_path[0]}/{name_list[index]}",
path_to = f"{current_model_path[0]}/{name_list[index]}")
# S4 Delete the group
omni.kit.commands.execute('DeletePrims',
paths=[f"{current_model_path[0]}/Sub_Exploded_Model"])
# S5 Change pivot
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path(f"{current_model_path[0]}/Explosion_Centre" + ".xformOp:translate"),
value=Gf.Vec3d(default_pivot[0], default_pivot[1], default_pivot[2]),
prev=Gf.Vec3d(0, 0, 0))
# Restore_default_panel
x_coord.model.set_value(default_pivot[0])
y_coord.model.set_value(default_pivot[1])
z_coord.model.set_value(default_pivot[2])
if x == 0.0 and y== 0.0 and z == 0.0:
pass
else:
x_ratio.model.set_value(x)
y_ratio.model.set_value(y)
z_ratio.model.set_value(z)
# Select
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[],
new_selected_paths=[f'{current_model_path[0]}'],
expand_in_stage=True)
return
# If no remove actions,return
else:
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[],
new_selected_paths=[f'{current_model_path[0]}'],
expand_in_stage=True)
return
except:
print("Create a model to explode at first!")
return
#---------------------------------------------------------ADD-----------------------------------------------------------
def add_item(x_coord, y_coord, z_coord, x_ratio, y_ratio, z_ratio):
try:
global original_path
global current_model_path
global item_count
global default_pivot
global item_list0
global translate_list0
stage = omni.usd.get_context().get_stage()
selected_prim_path = omni.usd.get_context().get_selection().get_selected_prim_paths()
if not selected_prim_path:
return
# Add correct items
for i in selected_prim_path:
path = str(i)
name = get_name_from_path(path)
if path.find(current_model_path[0]) == -1:
# Restore values to 0 to record the postions
x = x_ratio.model.get_value_as_float()
y = y_ratio.model.get_value_as_float()
z = z_ratio.model.get_value_as_float()
# If 0, pass
if x == 0.0 and y== 0.0 and z == 0.0:
pass
# If not, set 0
else:
x_ratio.model.set_value(0.0)
y_ratio.model.set_value(0.0)
z_ratio.model.set_value(0.0)
omni.kit.commands.execute('MovePrim',
path_from = path,
path_to = f"{current_model_path[0]}/" + name)
else:
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[],
new_selected_paths=[f'{current_model_path[0]}'],
expand_in_stage=True)
print("The selected item already existed in the model!")
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[],
new_selected_paths=[f'{current_model_path[0]}'],
expand_in_stage=True)
selected_prim_path = omni.usd.get_context().get_selection().get_selected_prim_paths()
obj = stage.GetObjectAtPath(selected_prim_path[0])
children_prims_list_z = obj.GetChildren()
new_count2 = len(children_prims_list_z) - 1
# print(new_count2)
# If any items is added
if new_count2 > item_count:
# Refresh item_count
item_count = new_count2
obj = stage.GetObjectAtPath(selected_prim_path[0])
# Refresh item_list0 and translate_list0
outer_group_prim = stage.GetPrimAtPath(current_model_path[0])
children_prims_list0 = outer_group_prim.GetChildren()
children_prims_list = get_pure_list(children_prims_list0)
item_list0 = children_prims_list
translate_list0 = []
for i in children_prims_list:
sub_children_prim_list = i.GetChildren()
if len(sub_children_prim_list) <= 1:
translate = i.GetAttribute('xformOp:translate').Get()
else:
translate = i.GetAttribute('xformOp:translate:pivot').Get()
translate_list0.append(translate)
# Refresh pivot
group_list = []
name_list = []
for i in item_list0:
item_path = str(i.GetPath())
name = get_name_from_path(item_path)
name_list.append(name)
group_list.append(item_path)
# S1 Group
omni.kit.commands.execute('GroupPrims',
prim_paths=group_list)
# Change group name
selected_prim_path = omni.usd.get_context().get_selection().get_selected_prim_paths()
omni.kit.commands.execute('MovePrims',
paths_to_move={selected_prim_path[0]: f"{current_model_path[0]}/Sub_Exploded_Model"})
# S2 Get new pivot by group
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[],
new_selected_paths=[f"{current_model_path[0]}/Sub_Exploded_Model"],
expand_in_stage=True)
selected_prim_path = omni.usd.get_context().get_selection().get_selected_prim_paths()
obj = stage.GetObjectAtPath(selected_prim_path[0])
default_pivot = obj.GetAttribute('xformOp:translate:pivot').Get()
print(default_pivot)
# S3 Move members out of the group
group_path = selected_prim_path
outer_group_prim = stage.GetPrimAtPath(group_path[0])
children_prims_list = outer_group_prim.GetChildren()
for i in children_prims_list:
index = children_prims_list.index(i)
name = name_list[index]
omni.kit.commands.execute('MovePrim',
path_from= f"{group_path[0]}/{name_list[index]}",
path_to=f"{current_model_path[0]}/{name_list[index]}")
# S4 Delete the group
omni.kit.commands.execute('DeletePrims',
paths=[f"{current_model_path[0]}/Sub_Exploded_Model"])
# S5 Change pivot
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path(f"{current_model_path[0]}/Explosion_Centre" + ".xformOp:translate"),
value=Gf.Vec3d(default_pivot[0], default_pivot[1], default_pivot[2]),
prev=Gf.Vec3d(0, 0, 0))
# Restore_default_panel
x_coord.model.set_value(default_pivot[0])
y_coord.model.set_value(default_pivot[1])
z_coord.model.set_value(default_pivot[2])
if x == 0.0 and y== 0.0 and z == 0.0:
pass
else:
x_ratio.model.set_value(x)
y_ratio.model.set_value(y)
z_ratio.model.set_value(z)
# Select
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[],
new_selected_paths=[f'{current_model_path[0]}'],
expand_in_stage=True)
return
# If no add actions,return
else:
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[],
new_selected_paths=[f'{current_model_path[0]}'],
expand_in_stage=True)
return
except:
print("Create a model to explode at first!")
return
#--------------------------------------------------------BIND-----------------------------------------------------------
def bind_item(x_coord, y_coord, z_coord, x_ratio, y_ratio, z_ratio):
try:
global original_path
global current_model_path
global item_count
global default_pivot
global item_list0
global translate_list0
stage = omni.usd.get_context().get_stage()
selected_prim_path = omni.usd.get_context().get_selection().get_selected_prim_paths()
if not selected_prim_path:
return
if len(selected_prim_path) < 2:
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[],
new_selected_paths=[f'{current_model_path[0]}'],
expand_in_stage=True)
print("Bind at least 2 items in the model!")
return
group_list = []
for i in selected_prim_path:
path = str(i)
name = get_name_from_path(path)
if path != f"{current_model_path[0]}/" + "Explosion_Centre":
if path.find(current_model_path[0]) != -1:
group_list.append(i)
else:
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[],
new_selected_paths=[f'{current_model_path[0]}'],
expand_in_stage=True)
print("Cannot bind the Explosion_Centre!")
# Restore values to 0 to bind
x = x_ratio.model.get_value_as_float()
y = y_ratio.model.get_value_as_float()
z = z_ratio.model.get_value_as_float()
# If 0,pass
if x == 0.0 and y== 0.0 and z == 0.0:
pass
# If not,set 0
else:
x_ratio.model.set_value(0.0)
y_ratio.model.set_value(0.0)
z_ratio.model.set_value(0.0)
# Bind items
omni.kit.commands.execute('GroupPrims',
prim_paths=group_list)
selected_prim_path = omni.usd.get_context().get_selection().get_selected_prim_paths()
group_path = selected_prim_path[0]
# print(group_path)
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[],
new_selected_paths=[f'{current_model_path[0]}'],
expand_in_stage=True)
selected_prim_path = omni.usd.get_context().get_selection().get_selected_prim_paths()
obj = stage.GetObjectAtPath(selected_prim_path[0])
children_prims_list_z = obj.GetChildren()
new_count2 = len(children_prims_list_z) - 1
# print(new_count2)
# If bind actions
if new_count2 < item_count:
# Refresh item_count
item_count = new_count2
obj = stage.GetObjectAtPath(selected_prim_path[0])
# Refresh item_list0 and translate_list0
outer_group_prim = stage.GetPrimAtPath(current_model_path[0])
children_prims_list0 = outer_group_prim.GetChildren()
children_prims_list = get_pure_list(children_prims_list0)
item_list0 = children_prims_list
translate_list0 = []
for i in children_prims_list:
sub_children_prim_list = i.GetChildren()
if len(sub_children_prim_list) <= 1:
translate = i.GetAttribute('xformOp:translate').Get()
else:
translate = i.GetAttribute('xformOp:translate:pivot').Get()
translate_list0.append(translate)
# Refresh pivot
default_pivot = default_pivot
# Restore_default_panel
x_coord.model.set_value(default_pivot[0])
y_coord.model.set_value(default_pivot[1])
z_coord.model.set_value(default_pivot[2])
if x == 0.0 and y== 0.0 and z == 0.0:
pass
else:
x_ratio.model.set_value(x)
y_ratio.model.set_value(y)
z_ratio.model.set_value(z)
# Select
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[],
new_selected_paths=[f'{current_model_path[0]}'],
expand_in_stage=True)
return
# If no bind,return
else:
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[],
new_selected_paths=[f'{current_model_path[0]}'],
expand_in_stage=True)
return
except:
print("Create a model to explode at first!")
return
#------------------------------------------------------UNBIND-----------------------------------------------------------
def unbind_item(x_coord, y_coord, z_coord, x_ratio, y_ratio, z_ratio):
try:
global original_path
global current_model_path
global item_count
global default_pivot
global item_list0
global translate_list0
stage = omni.usd.get_context().get_stage()
selected_prim_path = omni.usd.get_context().get_selection().get_selected_prim_paths()
if not selected_prim_path:
return
# Test valid group
for i in selected_prim_path:
path0 = str(i)
if path0 != current_model_path[0] and path0.find(current_model_path[0]) != -1:
outer_group_prim = stage.GetPrimAtPath(path0)
children_prims_list0 = outer_group_prim.GetChildren()
if len(children_prims_list0) < 1:
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[],
new_selected_paths=[f'{current_model_path[0]}'],
expand_in_stage=True)
print("Please select a valid group!")
return
else:
if selected_prim_path.index(i) == 0:
# Restore values to 0 to unbind
x = x_ratio.model.get_value_as_float()
y = y_ratio.model.get_value_as_float()
z = z_ratio.model.get_value_as_float()
# If 0,pass
if x == 0.0 and y== 0.0 and z == 0.0:
pass
# If not,set 0
else:
x_ratio.model.set_value(0.0)
y_ratio.model.set_value(0.0)
z_ratio.model.set_value(0.0)
for j in children_prims_list0:
path = str(j.GetPath())
name = get_name_from_path(path)
omni.kit.commands.execute('MovePrims',
paths_to_move={path: f"{current_model_path[0]}/{name}"})
# Delete group
omni.kit.commands.execute('DeletePrims',
paths=[path0])
else:
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[],
new_selected_paths=[f'{current_model_path[0]}'],
expand_in_stage=True)
print("Please unbind a valid group!")
return
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[],
new_selected_paths=[f'{current_model_path[0]}'],
expand_in_stage=True)
selected_prim_path = omni.usd.get_context().get_selection().get_selected_prim_paths()
obj = stage.GetObjectAtPath(selected_prim_path[0])
children_prims_list_z = obj.GetChildren()
new_count2 = len(children_prims_list_z) - 1
# print(new_count2)
# If unbind actions
if new_count2 >= item_count:
# Refresh item_count
item_count = new_count2
obj = stage.GetObjectAtPath(selected_prim_path[0])
# Refresh item_list0 and translate_list0
outer_group_prim = stage.GetPrimAtPath(current_model_path[0])
children_prims_list0 = outer_group_prim.GetChildren()
children_prims_list = get_pure_list(children_prims_list0)
item_list0 = children_prims_list
translate_list0 = []
for i in children_prims_list:
sub_children_prim_list = i.GetChildren()
if len(sub_children_prim_list) <= 1:
translate = i.GetAttribute('xformOp:translate').Get()
else:
translate = i.GetAttribute('xformOp:translate:pivot').Get()
translate_list0.append(translate)
# Refresh pivot
default_pivot = default_pivot
# Restore_default_panel
x_coord.model.set_value(default_pivot[0])
y_coord.model.set_value(default_pivot[1])
z_coord.model.set_value(default_pivot[2])
if x == 0.0 and y== 0.0 and z == 0.0:
pass
else:
x_ratio.model.set_value(x)
y_ratio.model.set_value(y)
z_ratio.model.set_value(z)
# Select
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[],
new_selected_paths=[f'{current_model_path[0]}'],
expand_in_stage=True)
return
# If no unbind,return
else:
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[],
new_selected_paths=[f'{current_model_path[0]}'],
expand_in_stage=True)
return
except:
print("Create a model to explode at first!")
return
#------------------------------------------------------ONCHANGE----------------------------------------------------------
def on_pivot_change(x_coord, y_coord, z_coord, x_button, y_button, z_button, a:float):
try:
global original_path
global current_model_path
global item_count
global default_pivot
global item_list0
global translate_list0
stage = omni.usd.get_context().get_stage()
# Select Model
if not current_model_path:
print("Please select items to explode at first")
return
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[],
new_selected_paths=[f'{current_model_path[0]}'],
expand_in_stage=True)
# Get x,y,z value
x_position = x_coord.model.get_value_as_float()
y_position = y_coord.model.get_value_as_float()
z_position = z_coord.model.get_value_as_float()
# print(x_position, y_position, z_position)
# Change pivot
omni.kit.commands.execute('TransformPrimSRT',
path=Sdf.Path(f"{current_model_path[0]}/" + "Explosion_Centre"),
new_translation=Gf.Vec3d(x_position, y_position, z_position),
old_translation=Gf.Vec3d(0, 0, 0))
# Get new pivot
obj2 = stage.GetObjectAtPath(f"{current_model_path[0]}/" + "Explosion_Centre")
pivot = obj2.GetAttribute('xformOp:translate').Get()
# Get x,y,z ratio
x_ratio = x_button.model.get_value_as_float()
y_ratio = y_button.model.get_value_as_float()
z_ratio = z_button.model.get_value_as_float()
# Calculate each item
group_prim = stage.GetPrimAtPath(current_model_path[0])
children_prims_list0 = group_prim.GetChildren()
children_prims_list = get_pure_list(children_prims_list0)
# Move each item
for item in children_prims_list:
sub_children_prim_list = item.GetChildren()
index = children_prims_list.index(item)
translate = translate_list0[index]
# print(translate)
item_path = item.GetPrimPath()
# print(item_path)
if len(sub_children_prim_list) <= 1:
# If single item
x_distance = (translate[0] - pivot[0]) * x_ratio
y_distance = (translate[1] - pivot[1]) * y_ratio
z_distance = (translate[2] - pivot[2]) * z_ratio
omni.kit.commands.execute('TransformPrimSRT',
path=Sdf.Path(item_path),
new_translation=Gf.Vec3d(translate[0] + x_distance, translate[1] + y_distance, translate[2] + z_distance),
old_translation=translate)
else:
# If group item
x_distance = (translate[0] - pivot[0]) * x_ratio
y_distance = (translate[1] - pivot[1]) * y_ratio
z_distance = (translate[2] - pivot[2]) * z_ratio
omni.kit.commands.execute('TransformPrimSRT',
path=Sdf.Path(item_path),
new_translation=Gf.Vec3d(x_distance, y_distance, z_distance),
old_translation=translate)
# End
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[],
new_selected_paths=[f"{current_model_path[0]}/" + "Explosion_Centre"],
expand_in_stage=True)
except:
x_coord.model.set_value(0.0)
y_coord.model.set_value(0.0)
z_coord.model.set_value(0.0)
x_button.model.set_value(0.0)
y_button.model.set_value(0.0)
z_button.model.set_value(0.0)
print("Create a model to explode at first!")
return
def on_ratio_change(x_button, y_button, z_button, x_coord, y_coord, z_coord, a:float):
try:
global original_path
global current_model_path
global item_count
global default_pivot
global item_list0
global translate_list0
stage = omni.usd.get_context().get_stage()
# Select Model
if not current_model_path:
print("Please select items to explode at first")
return
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[],
new_selected_paths=[f'{current_model_path[0]}'],
expand_in_stage=True)
# Get x,y,z value
x_position = x_coord.model.get_value_as_float()
y_position = y_coord.model.get_value_as_float()
z_position = z_coord.model.get_value_as_float()
# print(x_position, y_position, z_position)
# Change pivot
omni.kit.commands.execute('TransformPrimSRT',
path=Sdf.Path(f"{current_model_path[0]}/" + "Explosion_Centre"),
new_translation=Gf.Vec3d(x_position, y_position, z_position),
old_translation=Gf.Vec3d(0, 0, 0))
# Get new pivot
obj = stage.GetObjectAtPath(f"{current_model_path[0]}/" + "Explosion_Centre")
pivot = obj.GetAttribute('xformOp:translate').Get()
# Get x,y,z ratio
x_ratio = x_button.model.get_value_as_float()
y_ratio = y_button.model.get_value_as_float()
z_ratio = z_button.model.get_value_as_float()
# Calculate each item
group_prim = stage.GetPrimAtPath(current_model_path[0])
children_prims_list0 = group_prim.GetChildren()
children_prims_list = get_pure_list(children_prims_list0)
# Move each item
for item in children_prims_list:
sub_children_prim_list = item.GetChildren()
index = children_prims_list.index(item)
translate = translate_list0[index]
item_path = item.GetPrimPath()
if len(sub_children_prim_list) <= 1:
# If single item
x_distance = (translate[0] - pivot[0]) * x_ratio
y_distance = (translate[1] - pivot[1]) * y_ratio
z_distance = (translate[2] - pivot[2]) * z_ratio
omni.kit.commands.execute('TransformPrimSRT',
path=Sdf.Path(item_path),
new_translation=Gf.Vec3d(translate[0] + x_distance, translate[1] + y_distance, translate[2] + z_distance),
old_translation=translate)
else:
# If group item
x_distance = (translate[0] - pivot[0]) * x_ratio
y_distance = (translate[1] - pivot[1]) * y_ratio
z_distance = (translate[2] - pivot[2]) * z_ratio
omni.kit.commands.execute('TransformPrimSRT',
path=Sdf.Path(item_path),
new_translation=Gf.Vec3d(x_distance, y_distance, z_distance),
old_translation=translate)
# End
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[],
new_selected_paths=[f"{current_model_path[0]}/" + "Explosion_Centre"],
expand_in_stage=True)
except:
x_coord.model.set_value(0.0)
y_coord.model.set_value(0.0)
z_coord.model.set_value(0.0)
x_button.model.set_value(0.0)
y_button.model.set_value(0.0)
z_button.model.set_value(0.0)
print("Create a model to explode at first!")
return
#-------------------------------------------------SECONDARY FUNCTION------------------------------------------------------
def hide_unhide_original_model():
try:
global original_path
global item_count
stage = omni.usd.get_context().get_stage()
visible_count = 0
for i in original_path:
obj = stage.GetObjectAtPath(i)
visual_attr = obj.GetAttribute('visibility').Get()
# print(type(visual_attr))
# print(visual_attr)
if visual_attr == "inherited":
visible_count += 1
# print(original_path)
# print(visible_count)
# print(item_count)
# All light
if visible_count == 1:
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path(f"{original_path[0]}.visibility"),
value='invisible',
prev=None)
# All light
elif visible_count < item_count:
for i in original_path:
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path(f"{i}.visibility"),
value='inherited',
prev=None)
# All dark
elif visible_count == item_count:
for i in original_path:
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path(f"{i}.visibility"),
value='invisible',
prev=None)
return
except:
print("Cannot find ORIGINAL prims to hide or show!")
return
def set_camera():
stage = omni.usd.get_context().get_stage()
world = stage.GetObjectAtPath('/World')
children_refs = world.GetChildren()
for i in children_refs:
path = str(i)
if path.find('/World/Axonometric_View') != -1:
print("Axonometric camera already existed!")
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[],
new_selected_paths=['/World/Axonometric_View'],
expand_in_stage=True)
return
omni.kit.commands.execute('DuplicateFromActiveViewportCameraCommand',
viewport_name='Viewport')
omni.kit.commands.execute('CreatePrim',
prim_path='/World/Camera',
prim_type='Camera')
selected_prim_path = omni.usd.get_context().get_selection().get_selected_prim_paths()
camera_path = selected_prim_path
omni.kit.commands.execute('MovePrims',
paths_to_move={camera_path[0]: '/World/Axonometric_View'})
omni.kit.commands.execute('MovePrim',
path_from=camera_path[0],
path_to='/World/Axonometric_View',
time_code=Usd.TimeCode.Default(),
keep_world_transform=True)
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path('/World/Axonometric_View.focalLength'),
value=500.0,
prev=0)
return
def reset_model(x_coord, y_coord, z_coord, x_ratio, y_ratio, z_ratio):
try:
global default_pivot
x_coord.model.set_value(default_pivot[0])
y_coord.model.set_value(default_pivot[1])
z_coord.model.set_value(default_pivot[2])
x = x_ratio.model.get_value_as_float()
y = y_ratio.model.get_value_as_float()
z = z_ratio.model.get_value_as_float()
# If 0,pass
if x == 0.0 and y== 0.0 and z == 0.0:
pass
# If not, set 0
else:
x_ratio.model.set_value(0.0)
y_ratio.model.set_value(0.0)
z_ratio.model.set_value(0.0)
return
except:
print("Create a model to explode at first!")
return
def clear(x_coord, y_coord, z_coord, x_ratio, y_ratio, z_ratio):
try:
global original_path
global current_model_path
global item_count
global default_pivot
global item_list0
global translate_list0
omni.kit.commands.execute('DeletePrims',
paths=[current_model_path[0]])
original_path = None
current_model_path = None
item_count = None
default_pivot = None
item_list0 = None
translate_list0 = None
x_coord.model.set_value(0.0)
y_coord.model.set_value(0.0)
z_coord.model.set_value(0.0)
x_ratio.model.set_value(0.0)
y_ratio.model.set_value(0.0)
z_ratio.model.set_value(0.0)
print("All data clear")
return
except:
print("Create a model to explode at first!")
return
| 42,513 | Python | 37.094982 | 126 | 0.532002 |
HC2ER/OmniverseExtension-hnadi.tools.exploded_view/hnadi/tools/exploded_view/extension.py | import omni.ext
import omni.usd
from .exploded_view_ui import Cretae_UI_Framework
class Main_Entrance(omni.ext.IExt):
def on_startup(self, ext_id):
Cretae_UI_Framework(self)
def on_shutdown(self):
print("[hnadi.tools.exploded_view] shutdown")
self._window.destroy()
self._window = None
stage = omni.usd.get_context().get_stage() | 379 | Python | 26.142855 | 53 | 0.659631 |
HC2ER/OmniverseExtension-hnadi.tools.exploded_view/hnadi/tools/exploded_view/utils.py | def get_name_from_path(path:str):
reverse_str = list(reversed(path))
num = len(reverse_str)-((reverse_str.index("/"))+1)
name = path[num+1:]
return name
def get_pure_list(list:list):
new_list = []
for i in list:
full_path0 = i.GetPrimPath()
full_path = str(full_path0)
if get_name_from_path(full_path) != "Explosion_Centre":
new_list.append(i)
# print(new_list)
return new_list | 450 | Python | 29.066665 | 63 | 0.586667 |
HC2ER/OmniverseExtension-hnadi.tools.exploded_view/hnadi/tools/exploded_view/exploded_view_style.py | # Copyright (c) 2022, HNADIACE. All rights reserved.
__all__ = ["HNADI_window_style"]
from omni.ui import color as cl
from omni.ui import constant as fl
from omni.ui import url
import omni.kit.app
import omni.ui as ui
import pathlib
EXTENSION_FOLDER_PATH = pathlib.Path(
omni.kit.app.get_app().get_extension_manager().get_extension_path_by_module(__name__)
)
##颜色预设##
#主题色
main_color = cl.hnadi_color = cl("#F5B81B")
#主字体色
white = cl.hnadi_text_color = cl("#DADADA") # 最浅色
#窗口
cl.window_label_bg = cl("#0F0F0F") # 窗口标题背景色
cl.window_bg = cl("#252525") # 窗口背景色,60~90%透明度(透明度不知道定义)
#折叠框架
cl.clloapsible_bg_label = cl("#252525")
#按钮
cl.button_bg = cl("#252525") # 常规背景色+边框#9393939,1px
cl.button_bg_hover = cl("#98999C")
cl.button_bg_click = cl("#636363")
cl.button_label = cl("#939393") # 按钮常规字体颜色
cl.button_label_hover = cl("#383838") # 按钮悬停时字体颜色
cl.button_label_click = cl("#DADADA")
#下拉框
cl.combobox_bg = cl("#252525")
cl.combobox_label = cl("#939393")
cl.combobox_bg_hover = cl("#0F0F0F")
cl.combobox_label_hover = cl("#AFAFAF")
#勾选框/还原按钮
cl.revert_arrow_enabled = cl("#AFAFAF") # 启用状态
cl.revert_arrow_disabled = cl("#383838") # 禁用状态
cl.checkbox_hover = cl("#DADADA")
cl.checkbox_click = cl("#F5B81B")
#边界线框
border_color = cl.border = cl("#636363") # 1px-2px厚度
#滑块
cl.slider_fill = cl("#F5B81B") # 滑块填充色,主题色
cl.slider_bg = cl("#252525")
cl.floatslider_sele = cl("#BB8E1A") # 滑块点击效果
cl.slider_text_color = cl("98999C")
#还原按钮
cl.revert_arrow_enabled = cl("#F5B81B") # 启用状态
cl.revert_arrow_disabled = cl("#383838") # 禁用状态
#好像用不到的
cl.transparent = cl(0, 0, 0, 0)
# HC Color
black = cl("#252525")
white = cl("#FFFFFF")
cls_temperature_gradient = [cl("#fe0a00"), cl("#f4f467"), cl("#a8b9ea"), cl("#2c4fac"), cl("#274483"), cl("#1f334e")]
## 间距预设 ##
fl.window_attr_hspacing = 8 # 文字与功能框间距(全部)
fl.window_attr_spacing = 4 # 纵向间距
fl.group_spacing = 4 # 组间间距
fl.spacing = 4
fl.border_radius = 4
fl.border_width = 1
## 字体大小 ##
fl.window_title_font_size = 18
fl.collapsable_font_size = 16
fl.text_font_size = 14
## 链接 ##
url.icon_achiview = f"{EXTENSION_FOLDER_PATH}/image/achi_view.png"
url.icon_achiview_click = f"{EXTENSION_FOLDER_PATH}/image/achi_view_click.png"
url.icon_bowlgenerator = f"{EXTENSION_FOLDER_PATH}/image/bowl_generator.png"
url.icon_bowlgenerator_click = f"{EXTENSION_FOLDER_PATH}/image/bowl_generator_click.png"
url.icon_buildingblock = f"{EXTENSION_FOLDER_PATH}/image/building_block.png"
url.icon_buildingblock_click = f"{EXTENSION_FOLDER_PATH}/image/building_blockc_click.png"
url.icon_draincurve = f"{EXTENSION_FOLDER_PATH}/image/drain_curve.png"
url.icon_draincurve_click = f"{EXTENSION_FOLDER_PATH}/image/drain_curve_click.png"
url.icon_explodedview = f"{EXTENSION_FOLDER_PATH}/image/exploded_view.png"
url.icon_explodedview_click = f"{EXTENSION_FOLDER_PATH}/image/exploded_view_click.png"
url.icon_isochronouscircle = f"{EXTENSION_FOLDER_PATH}/image/isochronouscircle.png"
url.icon_isochronouscircle_click = f"{EXTENSION_FOLDER_PATH}/image/isochronouscircle_click.png"
url.icon_light_studio = f"{EXTENSION_FOLDER_PATH}/image/light_studio.png"
url.icon_lightstudio_click = f"{EXTENSION_FOLDER_PATH}/image/light_studio_click.png"
url.icon_solarpanel = f"{EXTENSION_FOLDER_PATH}/image/solar_panel.png"
url.icon_solarpanel_click = f"{EXTENSION_FOLDER_PATH}/image/solar_panel_click.png"
url.closed_arrow_icon = f"{EXTENSION_FOLDER_PATH}/image/closed.svg"
url.radio_btn_on_icon = f"{EXTENSION_FOLDER_PATH}/image/Slice 3.png"
url.radio_btn_off_icon = f"{EXTENSION_FOLDER_PATH}/image/Slice 1.png"
url.radio_btn_hovered_icon = f"{EXTENSION_FOLDER_PATH}/image/Slice 2.png"
## Style格式说明 ##
"""
"Button":{"border_width":0.5} # 1———为一类控件指定样式,直接"WidgetType":{}
"Button::B1":{XXXX} # 2———为一类控件下的某个实例指定特殊样式,"WidgetType::InstanceName":{}
"Button::B1:hovered/pressed":{XXXX} # 3———为一类控件的某个实例的某个状态指定样式,"WidgetType::InstanceName:State":{}
"Button.Label::B1":{} # 3———为一类控件的某个实例的某种属性指定样式,"WidgetType.AttributeName::InstanceName":{}
"""
HNADI_window_style = {
# 属性字体 attribute_name
"Label::attribute_name": {
"alignment": ui.Alignment.RIGHT_CENTER,
"margin_height": fl.window_attr_spacing,
"margin_width": fl.window_attr_hspacing,
"color": cl.button_label,
},
"Label::attribute_name:hovered": {"color": cl.hnadi_text_color},
# 可折叠标题
# 可折叠标题文字 collapsable_name
"Label::collapsable_name": {
"alignment": ui.Alignment.LEFT_CENTER,
"color": cl.hnadi_text_color,
"font_size": fl.collapsable_font_size,
},
# 可折叠标题命名(间隔属性) group
"CollapsableFrame::group": {"margin_height": fl.group_spacing},
# HeaderLine 线
"HeaderLine": {"color": cl(.5, .5, .5, .5)},
# 滑杆
"Slider": {
"border_radius": fl.border_radius,
"color": cl.slider_text_color,
"background_color": cl.slider_bg,
"secondary_color": cl.slider_fill,
"secondary_selected_color": cl.floatslider_sele,
"draw_mode": ui.SliderDrawMode.HANDLE,
},
# FloatSlider attribute_float
"Slider::attribute_float": {"draw_mode": ui.SliderDrawMode.FILLED},
"Slider::attribute_float:hovered": {
"color": cl.slider_text_color,
"background_color": cl.slider_bg
},
"Slider::attribute_float:pressed": {"color": cl.slider_text_color},
# IntSlider attribute_int
"Slider::attribute_int": {
"secondary_color": cl.slider_fill,
"secondary_selected_color": cl.floatslider_sele,
},
"Slider::attribute_int:hovered": {"color": cl.slider_text_color},
"Slider::attribute_float:pressed": {"color": cl.slider_text_color},
# 按钮 tool_button
"Button::tool_button": {
"background_color": cl.button_bg,
"border_width": fl.border_width,
"border_color": cl.border,
"border_radius": fl.border_radius,
},
"Button::tool_button:hovered": {"background_color": cl.button_bg_hover},
"Button::tool_button:pressed": {"background_color": cl.button_bg_click},
"Button::tool_button:checked": {"background_color": cl.button_bg_click},
"Button::tool_button:pressed": {"background_color": cl.slider_fill},
"Button.Label::tool_button:hovered": {"color": cl.button_label_hover},
"Button.Label::tool_button:pressed": {"color": white},
"Button.Label::tool_button": {"color": cl.button_label},
# # 图片按钮 image_button
# "Button::image_button": {
# "background_color": cl.transparent,
# "border_radius": fl.border_radius,
# "fill_policy": ui.FillPolicy.PRESERVE_ASPECT_FIT,
# },
# "Button.Image::image_button": {
# "image_url": url.icon_achiview,
# "alignment": ui.Alignment.CENTER_TOP,
# "border_radius": fl.border_radius,
# },
# "Button.Image::image_button:checked": {"image_url": url.icon_achiview_click},
# "Button::image_button:hovered": {"background_color": cl.button_bg_hover},
# "Button::image_button:pressed": {"background_color": cl.button_bg_click},
# "Button::image_button:checked": {"background_color": cl.imagebutton_bg_click},
# Field attribute_field
"Field": {
"background_color": cl.slider_bg,
"border_radius": fl.border_radius,
"border_color": cl.border,
"border_width": fl.border_width,
},
"Field::attribute_field": {
"corner_flag": ui.CornerFlag.RIGHT,
"font_size": fl.text_font_size,
},
"Field::attribute_field:hovered":{"background_color": cl.combobox_bg_hover},
"Field::attribute_field:pressed":{"background_color": cl.combobox_bg_hover},
# cl.slider_fill
# # 下拉框
"Rectangle::box": {
"background_color": cl.slider_fill,
"border_radius": fl.border_radius,
"border_color": cl.slider_fill,
"border_width": 0,
"color": cl.combobox_label,
},
# "ComboBox::dropdown_menu":{
# "background_color": cl.combobox_bg,
# "secondary_color": 0x0,
# "font_size": fl.text_font_size,
# },
# "ComboBox::dropdown_menu:hovered":{
# "color": cl.combobox_label_hover,
# "background_color": cl.combobox_bg_hover,
# "secondary_color": cl.combobox_bg_hover,
# },
# "ComboBox::dropdown_menu:pressed":{
# "background_color": cl.combobox_bg_hover,
# "border_color": cl.border,
# },
# "Rectangle::combobox_icon_cover": {"background_color": cl.field_bg},
# RadioButtion
# "Button::radiobutton":{
# "background_color":cl.transparent,
# "image_url": url.radio_btn_off_icon,
# },
# "Button::radiobutton:pressed":{"image_url": url.radio_btn_on_icon},
# "Button::radiobutton:checked":{"image_url": url.radio_btn_on_icon},
#图片
# "Image::radio_on": {"image_url": url.radio_btn_on_icon},
# "Image::radio_off": {"image_url": url.radio_btn_off_icon},
# "Image::collapsable_opened": {"color": cl.example_window_text, "image_url": url.example_window_icon_opened},
# "Image::collapsable_closed": {"color": cl.example_window_text, "image_url": url.example_window_icon_closed},
# "Image::collapsable_closed": {
# "color": cl.collapsible_header_text,
# "image_url": url.closed_arrow_icon,
# },
# "Image::collapsable_closed:hovered": {
# "color": cl.collapsible_header_text_hover,
# "image_url": url.closed_arrow_icon,
# },
}
| 9,302 | Python | 35.482353 | 117 | 0.65459 |
HC2ER/OmniverseExtension-hnadi.tools.exploded_view/hnadi/tools/exploded_view/exploded_view_ui.py | from os import path
from data.image_path import image_path
from functools import partial
import omni.ui as ui
from .exploded_view import select_explode_Xform, on_ratio_change, on_pivot_change, remove_item, add_item, bind_item, unbind_item, hide_unhide_original_model, reset_model, clear, set_camera
from .exploded_view_style import HNADI_window_style, main_color, white, border_color
# Connect to Extension
class Cretae_UI_Framework(ui.Window):
def __init__(self, transformer) -> None:
self = transformer
spacer_distance = distance = 6
overall_width = 380
overall_height = 395
self._window = ui.Window("Exploded View", width=overall_width, height=overall_height)
with self._window.frame:
with ui.VStack(style=HNADI_window_style):
# Column1 Main Functions UI
with ui.HStack(height = 170):
# two big buttons
ui.Spacer(width=6)
with ui.VStack(width = 120):
ui.Spacer(height = distance - 1)
select_button = create_button_type1(name="Select Prims", tooltip="Select a group or all items at once to explode.", pic=image_path.select, height=102, spacing=-45)
ui.Spacer(height = 4)
camera_button = create_button_type1_1(name="Axono", tooltip="Set an axonometirc camera.", pic=image_path.Axono, height=53, spacing=-20)
ui.Spacer(width = 10)
# four main control sliders
with ui.VStack():
ui.Spacer(height = distance+2)
x_button = create_floatfield_ui(label="X ratio", tooltip="Explosion distance ratio in X direction", max=100.0)
ui.Spacer(height = 2)
y_button = create_floatfield_ui(label="Y ratio", tooltip="Explosion distance ratio in Y direction", max=100.0)
ui.Spacer(height = 2)
z_button = create_floatfield_ui(label="Z ratio", tooltip="Explosion distance ratio in Z direction", max=100.0)
ui.Spacer(height = 2)
ui.Spacer(height = 4)
with ui.HStack():
ui.Label("Pivot", name="attribute_name", width = 40, height=25, tooltip="Coordinates of the Explosion_Centre")
ui.Spacer(width=10)
with ui.HStack():
x_coord = create_coord_ui(name="X", color=0xFF5555AA)
ui.Spacer(width=10)
y_coord = create_coord_ui(name="Y", color=0xFF76A371)
ui.Spacer(width=10)
z_coord = create_coord_ui(name="Z", color=0xFFA07D4F)
ui.Spacer(width=6)
# Column2 Edit Functions UI
with ui.CollapsableFrame("Edit Exploded Model", name="group", build_header_fn=_build_collapsable_header):
with ui.VStack():
with ui.HStack():
ui.Spacer(width=6)
add_button = create_button_type2(name="Add", tooltip="Add items into the Exploded_Model.", pic=image_path.add1, spacing=-85)
ui.Spacer(width=1)
bind_button = create_button_type2(name="Bind", tooltip="Bind items together to keep their relative distances during explosion.", pic=image_path.bind, spacing=-75)
ui.Spacer(width=6)
with ui.HStack():
ui.Spacer(width=6)
remove_button = create_button_type2(name="Remove", tooltip="Remove items from the Exploded_Model", pic=image_path.remove1, spacing=-85)
ui.Spacer(width=1)
unbind_button = create_button_type2(name="Unbind", tooltip="Unbind items.", pic=image_path.unbind, spacing=-75)
ui.Spacer(width=6)
ui.Spacer(height = 5)
ui.Spacer(height = 6)
# Column3 Other Functions UI
with ui.HStack():
ui.Spacer(width=6)
hideorshow_button = create_button_type3(tooltip="Hide or show the ORIGINAL prims.", pic=image_path.hide_show)
reset_button = create_button_type3(tooltip="Reset the Exploded_Model.", pic=image_path.reset)
clear_button = create_button_type3(tooltip="Delete the Exploded_Model and all data.", pic=image_path.clear)
ui.Spacer(width=6)
# Connect functions to button
select_button.set_clicked_fn(partial(select_explode_Xform, x_coord, y_coord, z_coord, x_button, y_button, z_button))
camera_button.set_clicked_fn(set_camera)
x_button.model.add_value_changed_fn(partial(on_ratio_change, x_button, y_button, z_button, x_coord, y_coord, z_coord))
y_button.model.add_value_changed_fn(partial(on_ratio_change, x_button, y_button, z_button, x_coord, y_coord, z_coord))
z_button.model.add_value_changed_fn(partial(on_ratio_change, x_button, y_button, z_button, x_coord, y_coord, z_coord))
x_coord.model.add_value_changed_fn(partial(on_pivot_change, x_coord, y_coord, z_coord, x_button, y_button, z_button))
y_coord.model.add_value_changed_fn(partial(on_pivot_change, x_coord, y_coord, z_coord, x_button, y_button, z_button))
z_coord.model.add_value_changed_fn(partial(on_pivot_change, x_coord, y_coord, z_coord, x_button, y_button, z_button))
add_button.set_clicked_fn(partial(add_item, x_coord, y_coord, z_coord, x_button, y_button, z_button))
remove_button.set_clicked_fn(partial(remove_item, x_coord, y_coord, z_coord, x_button, y_button, z_button))
bind_button.set_clicked_fn(partial(bind_item, x_coord, y_coord, z_coord, x_button, y_button, z_button))
unbind_button.set_clicked_fn(partial(unbind_item, x_coord, y_coord, z_coord, x_button, y_button, z_button))
hideorshow_button.set_clicked_fn(hide_unhide_original_model)
reset_button.set_clicked_fn(partial(reset_model, x_coord, y_coord, z_coord, x_button, y_button, z_button))
clear_button.set_clicked_fn(partial(clear, x_coord, y_coord, z_coord, x_button, y_button, z_button))
def _build_collapsable_header(collapsed, title):
"""Build a custom title of CollapsableFrame"""
with ui.VStack():
ui.Spacer(height=5)
with ui.HStack():
ui.Label(title, name="collapsable_name")
if collapsed:
image_name = "collapsable_opened"
else:
image_name = "collapsable_closed"
ui.Image(name=image_name, width=10, height=10)
ui.Spacer(height=5)
ui.Line(style_type_name_override="HeaderLine")
# UI button style
def create_coord_ui(color:str, name:str):
with ui.ZStack(width=13, height=25):
ui.Rectangle(name="vector_label", width=15, style={"background_color":main_color, "border_radius":3})
ui.Label(name, alignment=ui.Alignment.CENTER, style={"color":white})
coord_button =ui.FloatDrag(min=-99999999.9, max=99999999.9)
return coord_button
def create_floatfield_ui(label:str, max:float, tooltip:str, min=0.0):
with ui.HStack():
ui.Label(label, name="attribute_name", width=40, height=25, tooltip=tooltip)
ui.Spacer(width=1.5)
button = ui.FloatField(min=min, max=max, height=25)
button.model.set_value(0.0)
return button
def create_button_type1(name, tooltip, pic, height, spacing=-45):
style = {
"Button":{"stack_direction":ui.Direction.TOP_TO_BOTTOM},
"Button.Label":{"alignment":ui.Alignment.CENTER_BOTTOM},
# "border_width": 0.1,
# "border_color": border_color,
"border_radius": 4,
"Button.Image":{# "color":0xffFFFFFF,
"image_url":pic,
"alignment":ui.Alignment.CENTER_BOTTOM,},
":hovered":{
"background_gradient_color":main_color,
"background_color":0X500066FF}}
button = ui.Button(name, height=height, width=120, tooltip=tooltip, style=style)
button.spacing = spacing
return button
def create_button_type1_1(name, tooltip, pic, height, spacing=-20):
style = {
"Button":{"stack_direction":ui.Direction.LEFT_TO_RIGHT},
"Button.Image":{# "color":0xffFFFFFF,
"image_url":pic,
"alignment":ui.Alignment.CENTER_BOTTOM,},
"Button.Label":{"alignment":ui.Alignment.CENTER},
"border_radius": 4,
":hovered":{
"background_gradient_color":main_color,
"background_color":0X500066FF}}
button = ui.Button(name, height=height, width=120, tooltip=tooltip, style=style)
button.spacing = spacing
return button
def create_button_type2(name, tooltip, pic, height=40, spacing=-75):
style={
"Button":{"stack_direction":ui.Direction.LEFT_TO_RIGHT},
"Button.Image":{# "color":0xffFFFFFF,
"image_url":pic,
"alignment":ui.Alignment.CENTER,},
"Button.Label":{"alignment":ui.Alignment.CENTER},
"background_color":0x10CCCCCC,
":hovered":{
"background_gradient_color":0X500066FF,
"background_color":main_color},
"Button:pressed:":{"background_color":0xff000000}}
button = ui.Button(name, height=height, tooltip=tooltip, style=style)
button.spacing = spacing
return button
def create_button_type3(tooltip, pic, height=50):
style = {
"Button":{"stack_direction":ui.Direction.TOP_TO_BOTTOM},
"Button.Image":{# "color":0xffFFFFFF,
"image_url":pic,
"alignment":ui.Alignment.CENTER,},
"Button.Label":{"alignment":ui.Alignment.CENTER},
"border_radius": 4,
":hovered":{
"background_gradient_color":main_color,
"background_color":0X500066FF}}
button = ui.Button("", height = height, style = style, tooltip = tooltip)
return button | 10,328 | Python | 47.492958 | 190 | 0.583947 |
HC2ER/OmniverseExtension-hnadi.tools.exploded_view/data/image_path.py | from os import path
class image_path:
D = path.dirname(__file__)
add1 = D + "/add1.png"
Axono = D + "/Axono.png"
bind = D + "/bind.png"
clear = D + "/clear.png"
hide_show = D + "/hide&show.png"
preview = D + "/preview.png"
remove1 = D + "/remove1.png"
reset = D + "/reset.png"
select = D + "/select.png"
title = D + "/title.png"
unbind = D + "/unbind.png"
| 407 | Python | 21.666665 | 36 | 0.525799 |
gazebosim/gz-omni/tools/repoman/package.py | import os
import sys
import packmanapi
packagemaker_path = packmanapi.install("packagemaker", package_version="4.0.0-rc9", link_path='_packages/packagemaker')
sys.path.append('_packages/packagemaker')
import packagemaker
def main():
pkg = packagemaker.PackageDesc()
pkg.version = os.getenv('BUILD_NUMBER', '0')
pkg.output_folder = '_unsignedpackages'
pkg.name = 'samples'
pkg.files = [
('_build/windows-x86_64/release/*.exe'),
('_build/windows-x86_64/release/*.dll'),
]
packagemaker.package(pkg)
if __name__ == '__main__' or __name__ == '__mp_main__':
main()
| 614 | Python | 22.653845 | 119 | 0.643322 |
gazebosim/gz-omni/tools/repoman/repoman.py | import os
import sys
import packmanapi
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
REPO_ROOT_DIR = os.path.join(SCRIPT_DIR, "..", "..")
HOST_DEPS_PATH = os.path.join(REPO_ROOT_DIR, "_build", "host-deps")
repoman_link_path = os.path.abspath(os.path.join(HOST_DEPS_PATH, "nvtools_repoman"))
packmanapi.install("repo_repoman", package_version="0.1.1-beta2", link_path=repoman_link_path)
sys.path.append(repoman_link_path)
import api
| 448 | Python | 27.062498 | 94 | 0.720982 |
gazebosim/gz-omni/tools/repoman/findwindowsbuildtools.py | import os
import sys
import argparse
import subprocess
import json
from xml.etree import ElementTree
import repoman
import packmanapi
DEPS = {
"nvtools_build": {
"version": "0.2.0",
"link_path_host": "nvtools_build",
}
}
'''
buildtools: C:/Program Files (x86)/Microsoft Visual Studio/2019/BuildTools
vc: ../_build/host-deps/buildtools/VC/Tools/MSVC/14.27.29110
'''
def update_host_deps(host_deps_path:str, vs_path:str = "", msvc_ver:str = ""):
# Preserving comments in the XML host_deps file
# credit: https://stackoverflow.com/questions/33573807/faithfully-preserve-comments-in-parsed-xml
class CommentedTreeBuilder(ElementTree.TreeBuilder):
def comment(self, data):
self.start(ElementTree.Comment, {})
self.data(data)
self.end(ElementTree.Comment)
parser = ElementTree.XMLParser(target=CommentedTreeBuilder())
# python 3.8 adds insert_comments
#parser = ElementTree.XMLParser(target=ElementTree.TreeBuilder(insert_comments=True))
vc_path = os.path.join("..", "_build", "host-deps", "buildtools", "VC", "Tools", "MSVC", msvc_ver)
# Use winsdk.bat from Visual Studio tools to find the Windows SDK
windows_sdk_dir = ""
windows_sdk_ver = ""
windows_sdk_bin_dir = ""
windows_sdk_lib_dir = ""
windows_sdk_include_dir = ""
winsdk_bat_path = os.path.join(vs_path, "Common7", "Tools", "vsdevcmd", "core", "winsdk.bat")
if os.path.exists(winsdk_bat_path):
# We have a batch wrapper that calls the winsdk.bat file and emits the important env vars to be processed
script_path = os.path.split(os.path.abspath(__file__))[0]
cmd_line = []
cmd_line.append(os.path.join(script_path, "print_winsdk_env_vars.bat"))
cmd_line.append(winsdk_bat_path)
completed = subprocess.run(cmd_line, capture_output=True)
for line in completed.stdout.decode().splitlines():
if "WindowsSDKDir" in line:
windows_sdk_dir = line.split("=")[1].rstrip("\\")
elif "WindowsSdkVersion" in line:
windows_sdk_ver = line.split("=")[1].rstrip("\\")
if os.path.exists(windows_sdk_dir):
windows_sdk_bin_dir = os.path.join(windows_sdk_dir, "bin", windows_sdk_ver)
windows_sdk_include_dir = os.path.join(windows_sdk_dir, "include", windows_sdk_ver)
windows_sdk_lib_dir = os.path.join(windows_sdk_dir, "lib", windows_sdk_ver)
# Read the XML tree from the host_deps file
tree = ElementTree.parse(host_deps_path, parser)
root = tree.getroot()
# Replace the builtools and vc paths
find_replace_dict = {
"buildtools": vs_path,
"vc": vc_path,
"winsdk": windows_sdk_dir,
"winsdk_bin": windows_sdk_bin_dir,
"winsdk_include": windows_sdk_include_dir,
"winsdk_lib": windows_sdk_lib_dir,
}
for dependency in root.findall("dependency"):
for find_key in find_replace_dict.keys():
if "name" in dependency.attrib.keys() and find_key == dependency.attrib["name"] and find_replace_dict[find_key] != "":
for source in dependency.iter("source"):
source.attrib["path"] = find_replace_dict[find_key]
print("Updating <%s> attribute with <%s>" % (dependency.attrib["name"],source.attrib["path"]))
tree.write(host_deps_path)
'''
find_vs will search through the display names of the installed Visual Studio versions and
return the installation path for the first one that matches the input string provided
current display names:
* Visual Studio Community 2019
* Visual Studio Professional 2019
* Visual Studio Professional 2017
'''
def find_vs(search_str:str, listall:bool = False) -> str:
program_files = os.getenv("ProgramFiles(x86)")
if not program_files:
print("ERROR: No Program Files (x86) directory found")
return None
vswhere_path = os.path.join(program_files, "Microsoft Visual Studio", "Installer", "vswhere.exe")
if not os.path.exists(vswhere_path):
print("ERROR: vswhere.exe is not found here, so no Visual Studio installations found: " + vswhere_path)
return None
# Run vswhere with a json-formatted output
cmd_line = list()
cmd_line.append(vswhere_path)
cmd_line.append("-products")
cmd_line.append("*")
cmd_line.append("-format")
cmd_line.append("json")
completed = subprocess.run(cmd_line, capture_output=True)
version_strings = completed.stdout.decode()
version_json = json.loads(version_strings)
# Find the requested version using the displayName attribute
last_version = None
for vs_version in version_json:
if listall:
print(vs_version["displayName"])
last_version = vs_version["installationPath"]
elif search_str in vs_version["displayName"]:
return vs_version["installationPath"]
return last_version
'''
find_msvc_ver will list the first MSVC version found in a Visual Studio installation
vs_install_path = "C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Community"
returns something like "14.25.28610"
'''
def find_msvc_ver(vs_install_path:str) -> str:
# return first folder found in "VC/Tools/MSVC"
msvc_folder = os.path.join(vs_install_path, "VC", "Tools", "MSVC")
if not os.path.exists(msvc_folder):
print("ERROR: No MSVC folder found at " + msvc_folder)
return None
msvc_vers = os.listdir(msvc_folder)
if len(msvc_vers) > 0:
return msvc_vers[0]
else:
print("ERROR: No MSVC folder found at " + msvc_folder)
return None
def run_command():
parser = argparse.ArgumentParser()
parser.add_argument('-v',
'--visual-studio-version',
default='2019',
dest='vs_ver',
help='Different Visual Studio installation \"displayNames\" will be searched with this substring',
required=False)
parser.add_argument('-l',
'--list-all',
dest='list_all',
action='store_true',
help="Enable this to simply just list all Visual Studio installations rather than updating the host_deps file",
required=False)
parser.add_argument('-d',
'--host-deps-path',
dest='host_deps_path',
help="The path to the host_deps.packman.xml file",
required=False)
args, _ = parser.parse_known_args()
if not args.list_all:
print("Searching for an install of Visual Studio <%s>" % (args.vs_ver))
vs_path = find_vs(args.vs_ver, args.list_all)
if not vs_path:
print("ERROR: No Visual Studio Installation Found")
exit(1)
if not args.list_all and vs_path:
print("VS " + args.vs_ver + " found in: " + vs_path)
msvc_version = find_msvc_ver(vs_path)
if msvc_version:
print("VS " + args.vs_ver + " MSVC ver: " + msvc_version)
if args.host_deps_path and vs_path and msvc_version:
update_host_deps(args.host_deps_path, vs_path = vs_path, msvc_ver = msvc_version)
print("Update host dependencies file: " + args.host_deps_path)
if __name__ == "__main__" or __name__ == "__mp_main__":
run_command()
| 7,257 | Python | 37 | 130 | 0.637453 |
gazebosim/gz-omni/tools/repoman/clean.py | import os
import sys
import platform
def clean():
folders = [
'_build',
'_compiler',
'_builtpackages'
]
for folder in folders:
# having to do the platform check because its safer when you might be removing
# folders with windows junctions.
if os.path.exists(folder):
print("Removing %s" % folder)
if platform.system() == 'Windows':
os.system("rmdir /q /s %s > nul 2>&1" % folder)
else:
os.system("rm -r -f %s > /dev/null 2>&1" % folder)
if os.path.exists(folder):
print("Warning: %s was not successfully removed, most probably due to a file lock on 1 or more of the files." % folder)
if __name__ == "__main__" or __name__ == "__mp_main__":
clean()
| 812 | Python | 29.11111 | 135 | 0.539409 |
gazebosim/gz-omni/tools/repoman/build.py | import os
import sys
import argparse
import repoman
import packmanapi
DEPS = {
"nvtools_build": {
"version": "0.3.2",
"link_path_host": "nvtools_build",
}
}
def run_command():
platform_host = repoman.api.get_and_validate_host_platform(["windows-x86_64", "linux-x86_64"])
repo_folders = repoman.api.get_repo_paths()
repoman.api.fetch_deps(DEPS, platform_host, repo_folders["host_deps"])
BUILD_SCRIPT = os.path.join(repo_folders["host_deps"], "nvtools_build", "build.py")
# Fetch the asset dependencies
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--platform-target',
dest='platform_target', required=False)
options, _ = parser.parse_known_args()
# Checking if platform was passed
# We cannot use argparse's default, as we also need to set up the command line argument
# if it wasn't supplied. It is possible to also check for the host platform, if we want to
# make different default behavior when building on windows.
if not repoman.api.has_options_arg(options, 'platform_target'):
options.platform_target = 'linux-x86_64'
sys.argv.extend(["--platform-target", options.platform_target])
# We need the host-deps before we can run MSBuild
packmanapi.pull(os.path.join(repo_folders["root"], repo_folders["host_deps_xml"]), platform=platform_host)
# Construct arguments for the underlying script
script_argv = sys.argv[1:]
script_argv.extend(["--root", repo_folders["root"]])
script_argv.extend(["--deps-host", repo_folders["host_deps_xml"]])
script_argv.extend(["--deps-target", repo_folders["target_deps_xml"]])
if platform_host == "windows-x86_64":
script_argv.extend(["--premake-tool", os.path.join(repo_folders["host_deps"], "premake", "premake5.exe")])
# Look for different MSBuild versions
ms_build_path = ""
ms_build_locations = [
r"buildtools\MSBuild\15.0\Bin\MSBuild.exe",
r"buildtools\MSBuild\Current\Bin\MSBuild.exe",
]
for ms_build_location in ms_build_locations:
print("Checking if MSBuild.exe located here: " + os.path.join(repo_folders["host_deps"], ms_build_location))
if os.path.exists(os.path.join(repo_folders["host_deps"], ms_build_location)):
ms_build_path = os.path.join(repo_folders["host_deps"], ms_build_location)
break
print("Building using this MSBuild: " + ms_build_path)
script_argv.extend(["--msbuild-tool", ms_build_path])
script_argv.extend(["--vs-version", "vs2019"])
script_argv.extend(["--sln", os.path.join(repo_folders["compiler"], r"vs2019\Samples.sln")])
elif platform_host == "linux-x86_64":
script_argv.extend(["--premake-tool", os.path.join(repo_folders["host_deps"], "premake", "premake5")])
# Execute module script and set globals
repoman.api.run_script_with_custom_args(BUILD_SCRIPT, script_argv)
if __name__ == "__main__" or __name__ == "__mp_main__":
run_command()
| 3,071 | Python | 41.666666 | 120 | 0.645718 |
gazebosim/gz-omni/tools/repoman/filecopy.py | import os
import sys
import argparse
import packmanapi
import repoman
DEPS = {
"nvfilecopy": {
"version": "1.4",
"link_path_host": "nvtools_nvfilecopy",
}
}
if __name__ == "__main__" or __name__ == "__mp_main__":
repo_folders = repoman.api.get_repo_paths()
deps_folders = repoman.api.fetch_deps(DEPS, None, repo_folders["host_deps"])
sys.path.append(deps_folders["nvfilecopy"])
import nvfilecopy
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--platform-target', dest='platform_target', required=True)
options, _ = parser.parse_known_args()
platform_target = repoman.api.validate_platform(
"target",
options.platform_target,
["windows-x86_64", "linux-x86_64", "linux-aarch64"]
)
nvfilecopy.process_json_file(sys.argv[len(sys.argv) - 1], platform_target)
| 863 | Python | 25.999999 | 89 | 0.636153 |
NVlabs/ACID/PlushSim/scripts/python_app.py | #!/usr/bin/env python
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import carb
import omni.kit.app
import omni.kit
import os
import sys
import time
import asyncio
import argparse
DEFAULT_CONFIG = {
"width": 1024,
"height": 800,
"renderer": "PathTracing", # Can also be RayTracedLighting
"anti_aliasing": 3, # 3 for dlss, 2 for fxaa, 1 for taa, 0 to disable aa
"samples_per_pixel_per_frame": 64,
"denoiser": True,
"subdiv_refinement_level": 0,
"headless": True,
"max_bounces": 4,
"max_specular_transmission_bounces": 6,
"max_volume_bounces": 4,
"sync_loads": False,
"experience": f'{os.environ["EXP_PATH"]}/omni.bloky.python.kit',
}
class OmniKitHelper:
"""Helper class for launching OmniKit from a Python environment.
Launches and configures OmniKit and exposes useful functions.
Typical usage example:
.. highlight:: python
.. code-block:: python
config = {'width': 800, 'height': 600, 'renderer': 'PathTracing'}
kit = OmniKitHelper(config) # Start omniverse kit
# <Code to generate or load a scene>
kit.update() # Render a single frame"""
def __init__(self, config=DEFAULT_CONFIG):
"""The config variable is a dictionary containing the following entries
Args:
width (int): Width of the viewport and generated images. Defaults to 1024
height (int): Height of the viewport and generated images. Defaults to 800
renderer (str): Rendering mode, can be `RayTracedLighting` or `PathTracing`. Defaults to `PathTracing`
samples_per_pixel_per_frame (int): The number of samples to render per frame, used for `PathTracing` only. Defaults to 64
denoiser (bool): Enable this to use AI denoising to improve image quality. Defaults to True
subdiv_refinement_level (int): Number of subdivisons to perform on supported geometry. Defaults to 0
headless (bool): Disable UI when running. Defaults to True
max_bounces (int): Maximum number of bounces, used for `PathTracing` only. Defaults to 4
max_specular_transmission_bounces(int): Maximum number of bounces for specular or transmission, used for `PathTracing` only. Defaults to 6
max_volume_bounces(int): Maximum number of bounces for volumetric, used for `PathTracing` only. Defaults to 4
sync_loads (bool): When enabled, will pause rendering until all assets are loaded. Defaults to False
experience (str): The config json used to launch the application.
"""
# only import custom loop runner if we create this object
# from omni.kit.loop import _loop
# initialize vars
self._exiting = False
self._is_dirty_instance_mappings = True
self._previous_physics_dt = 1.0 / 60.0
self.config = DEFAULT_CONFIG
if config is not None:
self.config.update(config)
# Load app plugin
self._framework = carb.get_framework()
print(os.environ["CARB_APP_PATH"])
self._framework.load_plugins(
loaded_file_wildcards=["omni.kit.app.plugin"],
search_paths=[os.path.abspath(f'{os.environ["CARB_APP_PATH"]}/kit/plugins')],
)
print(DEFAULT_CONFIG)
# launch kit
self.last_update_t = time.time()
self.app = omni.kit.app.get_app()
self.kit_settings = None
self._start_app()
self.carb_settings = carb.settings.acquire_settings_interface()
self.setup_renderer(mode="default") # set rtx-defaults settings
self.setup_renderer(mode="non-default") # set rtx settings
self.timeline = omni.timeline.get_timeline_interface()
# Wait for new stage to open
new_stage_task = asyncio.ensure_future(omni.usd.get_context().new_stage_async())
print("OmniKitHelper Starting up ...")
while not new_stage_task.done():
time.sleep(0.001) # This sleep prevents a deadlock in certain cases
self.update()
self.update()
# Dock windows if they exist
main_dockspace = omni.ui.Workspace.get_window("DockSpace")
def dock_window(space, name, location):
window = omni.ui.Workspace.get_window(name)
if window and space:
window.dock_in(space, location)
return window
view = dock_window(main_dockspace, "Viewport", omni.ui.DockPosition.TOP)
self.update()
console = dock_window(view, "Console", omni.ui.DockPosition.BOTTOM)
prop = dock_window(view, "Property", omni.ui.DockPosition.RIGHT)
dock_window(view, "Main ToolBar", omni.ui.DockPosition.LEFT)
self.update()
dock_window(prop, "Render Settings", omni.ui.DockPosition.SAME)
self.update()
print("OmniKitHelper Startup Complete")
def _start_app(self):
args = [
os.path.abspath(__file__),
f'{self.config["experience"]}',
"--/persistent/app/viewport/displayOptions=0", # hide extra stuff in viewport
# Forces kit to not render until all USD files are loaded
f'--/rtx/materialDb/syncLoads={self.config["sync_loads"]}',
f'--/rtx/hydra/materialSyncLoads={self.config["sync_loads"]}'
f'--/omni.kit.plugin/syncUsdLoads={self.config["sync_loads"]}',
"--/app/content/emptyStageOnStart=False", # This is required due to a infinite loop but results in errors on launch
"--/app/hydraEngine/waitIdle=True",
"--/app/asyncRendering=False",
f'--/app/renderer/resolution/width={self.config["width"]}',
f'--/app/renderer/resolution/height={self.config["height"]}',
]
args.append(f"--portable")
args.append(f"--no-window")
args.append(f"--allow-root")
print(args)
self.app.startup("kit", f'{os.environ["CARB_APP_PATH"]}/kit', args)
def __del__(self):
if self._exiting is False and sys.meta_path is None:
print(
"\033[91m"
+ "ERROR: Python exiting while OmniKitHelper was still running, Please call shutdown() on the OmniKitHelper object to exit cleanly"
+ "\033[0m"
)
def shutdown(self):
self._exiting = True
print("Shutting Down OmniKitHelper...")
# We are exisitng but something is still loading, wait for it to load to avoid a deadlock
if self.is_loading():
print(" Waiting for USD resource operations to complete (this may take a few seconds)")
while self.is_loading():
self.app.update()
self.app.shutdown()
self._framework.unload_all_plugins()
print("Shutting Down Complete")
def get_stage(self):
"""Returns the current USD stage."""
return omni.usd.get_context().get_stage()
def set_setting(self, setting, value):
"""Convenience function to set settings.
Args:
setting (str): string representing the setting being changed
value: new value for the setting being changed, the type of this value must match its repsective setting
"""
if isinstance(value, str):
self.carb_settings.set_string(setting, value)
elif isinstance(value, bool):
self.carb_settings.set_bool(setting, value)
elif isinstance(value, int):
self.carb_settings.set_int(setting, value)
elif isinstance(value, float):
self.carb_settings.set_float(setting, value)
else:
raise ValueError(f"Value of type {type(value)} is not supported.")
def set_physics_dt(self, physics_dt: float = 1.0 / 150.0, physics_substeps: int = 1):
"""Specify the physics step size to use when simulating, default is 1/60.
Note that a physics scene has to be in the stage for this to do anything
Args:
physics_dt (float): Use this value for physics step
"""
if self.get_stage() is None:
return
if physics_dt == self._previous_physics_dt:
return
if physics_substeps is None or physics_substeps <= 1:
physics_substeps = 1
self._previous_physics_dt = physics_dt
from pxr import UsdPhysics, PhysxSchema
steps_per_second = int(1.0 / physics_dt)
min_steps = int(steps_per_second / physics_substeps)
physxSceneAPI = None
for prim in self.get_stage().Traverse():
if prim.IsA(UsdPhysics.Scene):
physxSceneAPI = PhysxSchema.PhysxSceneAPI.Apply(prim)
if physxSceneAPI is not None:
physxSceneAPI.GetTimeStepsPerSecondAttr().Set(steps_per_second)
settings = carb.settings.get_settings()
settings.set_int("persistent/simulation/minFrameRate", min_steps)
def update(self, dt=0.0, physics_dt=None, physics_substeps=None):
"""Render one frame. Optionally specify dt in seconds, specify None to use wallclock.
Specify physics_dt and physics_substeps to decouple the physics step size from rendering
For example: to render with a dt of 1/30 and simulate physics at 1/120 use:
- dt = 1/30.0
- physics_dt = 1/120.0
- physics_substeps = 4
Args:
dt (float): The step size used for the overall update, set to None to use wallclock
physics_dt (float, optional): If specified use this value for physics step
physics_substeps (int, optional): Maximum number of physics substeps to perform
"""
# dont update if exit was called
if self._exiting:
return
# a physics dt was specified and is > 0
if physics_dt is not None and physics_dt > 0.0:
self.set_physics_dt(physics_dt, physics_substeps)
# a dt was specified and is > 0
if dt is not None and dt > 0.0:
# if physics dt was not specified, use rendering dt
if physics_dt is None:
self.set_physics_dt(dt)
# self.loop_runner.set_runner_dt(dt)
self.app.update()
else:
# dt not specified, run in realtime
time_now = time.time()
dt = time_now - self.last_update_t
if physics_dt is None:
self.set_physics_dt(1.0 / 60.0, 4)
self.last_update_t = time_now
# self.loop_runner.set_runner_dt(dt)
self.app.update()
def play(self):
"""Starts the editor physics simulation"""
self.update()
self.timeline.play()
self.update()
def pause(self):
"""Pauses the editor physics simulation"""
self.update()
self.timeline.pause()
self.update()
def stop(self):
"""Stops the editor physics simulation"""
self.update()
self.timeline.stop()
self.update()
def get_status(self):
"""Get the status of the renderer to see if anything is loading"""
return omni.usd.get_context().get_stage_loading_status()
def is_loading(self):
"""convenience function to see if any files are being loaded
Returns:
bool: True if loading, False otherwise
"""
message, loaded, loading = self.get_status()
return loading > 0
def is_exiting(self):
"""get current exit status for this object
Returns:
bool: True if exit() was called previously, False otherwise
"""
return self._exiting
def execute(self, *args, **kwargs):
"""Allow use of omni.kit.commands interface"""
omni.kit.commands.execute(*args, **kwargs)
def setup_renderer(self, mode="non-default"):
rtx_mode = "/rtx-defaults" if mode == "default" else "/rtx"
"""Reset render settings to those in config. This should be used in case a new stage is opened and the desired config needs to be re-applied"""
self.set_setting(rtx_mode + "/rendermode", self.config["renderer"])
# Raytrace mode settings
self.set_setting(rtx_mode + "/post/aa/op", self.config["anti_aliasing"])
self.set_setting(rtx_mode + "/directLighting/sampledLighting/enabled", True)
# self.set_setting(rtx_mode + "/ambientOcclusion/enabled", True)
# Pathtrace mode settings
self.set_setting(rtx_mode + "/pathtracing/spp", self.config["samples_per_pixel_per_frame"])
self.set_setting(rtx_mode + "/pathtracing/totalSpp", self.config["samples_per_pixel_per_frame"])
self.set_setting(rtx_mode + "/pathtracing/clampSpp", self.config["samples_per_pixel_per_frame"])
self.set_setting(rtx_mode + "/pathtracing/maxBounces", self.config["max_bounces"])
self.set_setting(
rtx_mode + "/pathtracing/maxSpecularAndTransmissionBounces",
self.config["max_specular_transmission_bounces"],
)
self.set_setting(rtx_mode + "/pathtracing/maxVolumeBounces", self.config["max_volume_bounces"])
self.set_setting(rtx_mode + "/pathtracing/optixDenoiser/enabled", self.config["denoiser"])
self.set_setting(rtx_mode + "/hydra/subdivision/refinementLevel", self.config["subdiv_refinement_level"])
# Experimental, forces kit to not render until all USD files are loaded
self.set_setting(rtx_mode + "/materialDb/syncLoads", self.config["sync_loads"])
self.set_setting(rtx_mode + "/hydra/materialSyncLoads", self.config["sync_loads"])
self.set_setting("/omni.kit.plugin/syncUsdLoads", self.config["sync_loads"])
def create_prim(
self, path, prim_type, translation=None, rotation=None, scale=None, ref=None, semantic_label=None, attributes={}
):
"""Create a prim, apply specified transforms, apply semantic label and
set specified attributes.
args:
path (str): The path of the new prim.
prim_type (str): Prim type name
translation (tuple(float, float, float), optional): prim translation (applied last)
rotation (tuple(float, float, float), optional): prim rotation in radians with rotation
order ZYX.
scale (tuple(float, float, float), optional): scaling factor in x, y, z.
ref (str, optional): Path to the USD that this prim will reference.
semantic_label (str, optional): Semantic label.
attributes (dict, optional): Key-value pairs of prim attributes to set.
"""
from pxr import UsdGeom, Semantics
prim = self.get_stage().DefinePrim(path, prim_type)
for k, v in attributes.items():
prim.GetAttribute(k).Set(v)
xform_api = UsdGeom.XformCommonAPI(prim)
if ref:
prim.GetReferences().AddReference(ref)
if semantic_label:
sem = Semantics.SemanticsAPI.Apply(prim, "Semantics")
sem.CreateSemanticTypeAttr()
sem.CreateSemanticDataAttr()
sem.GetSemanticTypeAttr().Set("class")
sem.GetSemanticDataAttr().Set(semantic_label)
if rotation:
xform_api.SetRotate(rotation, UsdGeom.XformCommonAPI.RotationOrderXYZ)
if scale:
xform_api.SetScale(scale)
if translation:
xform_api.SetTranslate(translation)
return prim
def set_up_axis(self, axis):
"""Change the up axis of the current stage
Args:
axis: valid values are `UsdGeom.Tokens.y`, or `UsdGeom.Tokens.z`
"""
from pxr import UsdGeom, Usd
stage = self.get_stage()
rootLayer = stage.GetRootLayer()
rootLayer.SetPermissionToEdit(True)
with Usd.EditContext(stage, rootLayer):
UsdGeom.SetStageUpAxis(stage, axis)
| 16,266 | Python | 42.034391 | 151 | 0.624185 |
NVlabs/ACID/PlushSim/scripts/data_gen_attic.py | #!/usr/bin/env python
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import time
import argparse
import json
from utils import *
parser = argparse.ArgumentParser("Dataset generation")
################################################################
# save to args
parser.add_argument("--save_dir", type=str, default="/result/interaction_sequence")
parser.add_argument("--img_subdir", type=str, default='img')
parser.add_argument("--geom_subdir", type=str, default='geom')
parser.add_argument("--info_subdir", type=str, default='info')
parser.add_argument("--save_every", type=int, default=25)
################################################################
# interaction args
parser.add_argument("--num_interaction", type=int, default=18)
parser.add_argument("--reset_every", type=int, default=6)
################################################################
# scene args
parser.add_argument("--asset_root", type=str, default="/result/assets")
parser.add_argument("--scene_path", type=str, default="attic_lean/Attic_clean_v2.usda")
parser.add_argument("--plush_path", type=str, default="animals/teddy/teddy_scaled/teddy_scaled.usda")
parser.add_argument("--skip_layout_randomization", action="store_true", default=False)
parser.add_argument("--skip_lights_randomization", action="store_true", default=False)
args = parser.parse_args()
os.makedirs(args.save_dir, exist_ok=True)
os.makedirs(os.path.join(args.save_dir, args.img_subdir), exist_ok=True)
os.makedirs(os.path.join(args.save_dir, args.geom_subdir), exist_ok=True)
os.makedirs(os.path.join(args.save_dir, args.info_subdir), exist_ok=True)
img_dir = os.path.join(args.save_dir, args.img_subdir)
geom_dir = os.path.join(args.save_dir, args.geom_subdir)
info_dir = os.path.join(args.save_dir, args.info_subdir)
def main():
from attic_scene import attic_scene
scene_path = os.path.join(args.asset_root, args.scene_path)
plush_path = os.path.join(args.asset_root, args.plush_path)
scene = attic_scene(
scene_path,
plush_path,
RESET_STATIC=True,
RAND_LAYOUT=not args.skip_layout_randomization,
RAND_LIGHTS=not args.skip_lights_randomization,)
start_time = time.time()
# save scene overall info
with open(os.path.join(info_dir, "scene_meta.json"), 'w') as fp:
json.dump(scene.get_scene_metadata(), fp)
# number of resets
num_resets = (args.num_interaction + args.reset_every - 1) // args.reset_every
for reset in range(num_resets):
# save scene reset collider info
np.savez_compressed(os.path.join(info_dir, f"clutter_info_{reset:04d}.npz"), **scene.get_scene_background_state())
num_steps = min(args.num_interaction, (reset + 1) * args.reset_every) - reset * args.reset_every
# sample interactions
actions = {
'grasp_points':[],
'target_points':[],
'grasp_pixels':[],
'start_frames':[],
'release_frames':[],
'static_frames':[], }
# save start frame
save_frame(f"{reset:04d}_{scene.frame:06d}", scene.get_observations(), img_dir)
np.savez_compressed(
os.path.join(geom_dir, f"{reset:04d}_{scene.frame:06d}.npz"),
**scene.get_scene_state_plush(convert_to=np.float16))
for interaction in range(num_steps):
# stop simulating
scene.kit.pause()
action = scene.sample_action()
if action is None:
scene.kit.play()
continue
grasp_point, target_point, grasp_pixel = action
actions['grasp_points'].append(np.array(grasp_point,np.float16))
actions['target_points'].append(np.array(target_point,np.float16))
actions['grasp_pixels'].append(np.array(grasp_pixel,np.uint16))
actions['start_frames'].append(np.array(scene.frame,np.uint16))
save_frame(f"{reset:04d}_{scene.frame:06d}", scene.get_observations(), img_dir)
np.savez_compressed(
os.path.join(geom_dir, f"{reset:04d}_{scene.frame:06d}.npz"),
**scene.get_scene_state_plush(convert_to=np.float16))
scene.kit.play()
init_traj = scene.gripper.plan_trajectory(scene.gripper.eef_default_loc, grasp_point)
# move
for pos in init_traj:
scene.step()
scene.gripper.set_translation(tuple(pos))
if scene.frame % args.save_every == args.save_every - 1:
save_frame(f"{reset:04d}_{scene.frame:06d}", scene.get_observations(), img_dir)
np.savez_compressed(
os.path.join(geom_dir, f"{reset:04d}_{scene.frame:06d}.npz"),
**scene.get_scene_state_plush(convert_to=np.float16))
scene.kit.pause()
#init_move_traj = scene.gripper.set_translation(grasp_point)
scene.gripper.grasp(scene.plush)
scene.kit.play()
traj = scene.gripper.plan_trajectory(grasp_point, target_point)
# move
for pos in traj:
scene.step()
scene.gripper.set_translation(tuple(pos))
if scene.frame % args.save_every == args.save_every - 1:
save_frame(f"{reset:04d}_{scene.frame:06d}", scene.get_observations(), img_dir)
np.savez_compressed(
os.path.join(geom_dir, f"{reset:04d}_{scene.frame:06d}.npz"),
**scene.get_scene_state_plush(convert_to=np.float16))
# wait until stable
for ff in range(scene.FALL_MAX):
scene.step()
if scene.check_scene_static():
print(f"grasp reaching a resting state after {ff} steps")
break
save_frame(f"{reset:04d}_{scene.frame:06d}", scene.get_observations(), img_dir)
np.savez_compressed(
os.path.join(geom_dir, f"{reset:04d}_{scene.frame:06d}.npz"),
**scene.get_scene_state_plush(convert_to=np.float16))
actions['release_frames'].append(np.array(scene.frame,np.uint16))
# release
scene.kit.pause()
scene.gripper.ungrasp()
# TODO: delete gripper collider
scene.kit.play()
for ff in range(scene.FALL_MAX+scene.DROP_MIN):
scene.step()
if scene.frame % args.save_every == args.save_every - 1:
save_frame(f"{reset:04d}_{scene.frame:06d}", scene.get_observations(), img_dir)
np.savez_compressed(
os.path.join(geom_dir, f"{reset:04d}_{scene.frame:06d}.npz"),
**scene.get_scene_state_plush(convert_to=np.float16))
if ff < scene.DROP_MIN:
continue
if scene.check_scene_static():
print(f"release reaching a resting state after {ff} steps")
break
scene.gripper.reset_translation()
save_frame(f"{reset:04d}_{scene.frame:06d}", scene.get_observations(), img_dir)
np.savez_compressed(
os.path.join(geom_dir, f"{reset:04d}_{scene.frame:06d}.npz"),
**scene.get_scene_state_plush(convert_to=np.float16))
actions['static_frames'].append(np.array(scene.frame,np.uint16))
np.savez_compressed(os.path.join(info_dir, f"interaction_info_{reset:04d}.npz"), **actions)
end_time = time.time()
from datetime import timedelta
time_str = str(timedelta(seconds=end_time - start_time))
print(f'Sampling {num_steps} interactions takes: {time_str}')
scene.reset()
# cleanup
scene.kit.shutdown()
if __name__ == "__main__":
main()
| 8,282 | Python | 43.05851 | 122 | 0.588747 |
NVlabs/ACID/PlushSim/scripts/syntheticdata.py | #!/usr/bin/env python
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Helper class for obtaining groundtruth data from OmniKit.
Support provided for RGB, Depth, Bounding Box (2D Tight, 2D Loose, 3D),
segmentation (instance and semantic), and camera parameters.
Typical usage example:
kit = OmniKitHelper() # Start omniverse kit
sd_helper = SyntheticDataHelper()
gt = sd_helper.get_groundtruth(('rgb', 'depth', 'boundingBox2DTight'))
"""
import math
import carb
import omni
import time
from pxr import UsdGeom, Semantics, Gf
import numpy as np
class SyntheticDataHelper:
def __init__(self):
self.app = omni.kit.app.get_app_interface()
ext_manager = self.app.get_extension_manager()
ext_manager.set_extension_enabled("omni.syntheticdata", True)
from omni.syntheticdata import sensors, helpers
import omni.syntheticdata._syntheticdata as sd # Must be imported after getting app interface
self.sd = sd
self.sd_interface = self.sd.acquire_syntheticdata_interface()
self.viewport = omni.kit.viewport.get_viewport_interface()
self.carb_settings = carb.settings.acquire_settings_interface()
self.sensor_helper_lib = sensors
self.generic_helper_lib = helpers
mode = "numpy"
self.sensor_helpers = {
"rgb": sensors.get_rgb,
"depth": sensors.get_depth_linear,
"depthLinear": self.get_depth_linear,
"instanceSegmentation": sensors.get_instance_segmentation,
"semanticSegmentation": self.get_semantic_segmentation,
"boundingBox2DTight": sensors.get_bounding_box_2d_tight,
"boundingBox2DLoose": sensors.get_bounding_box_2d_loose,
"boundingBox3D": sensors.get_bounding_box_3d,
"camera": self.get_camera_params,
"pose": self.get_pose,
}
self.sensor_types = {
"rgb": self.sd.SensorType.Rgb,
"depth": self.sd.SensorType.DepthLinear,
"depthLinear": self.sd.SensorType.DepthLinear,
"instanceSegmentation": self.sd.SensorType.InstanceSegmentation,
"semanticSegmentation": self.sd.SensorType.SemanticSegmentation,
"boundingBox2DTight": self.sd.SensorType.BoundingBox2DTight,
"boundingBox2DLoose": self.sd.SensorType.BoundingBox2DLoose,
"boundingBox3D": self.sd.SensorType.BoundingBox3D,
}
self.sensor_state = {s: False for s in list(self.sensor_helpers.keys())}
def get_depth_linear(self, viewport):
""" Get Depth Linear sensor output.
Args:
viewport (omni.kit.viewport._viewport.IViewportWindow): Viewport from which to retrieve/create sensor.
Return:
(numpy.ndarray): A float32 array of shape (height, width, 1).
"""
sensor = self.sensor_helper_lib.create_or_retrieve_sensor(viewport, self.sd.SensorType.DepthLinear)
data = self.sd_interface.get_sensor_host_float_texture_array(sensor)
h, w = data.shape[:2]
return np.frombuffer(data, np.float32).reshape(h, w, -1)
def get_semantic_segmentation(self, viewport):
instance_data, instance_mappings = self.sensor_helpers['instanceSegmentation'](viewport, return_mapping=True)
ins_to_sem = np.zeros(np.max(instance_data)+1,dtype=np.uint8)
for im in instance_mappings[::-1]:
for i in im["instanceIds"]:
if i >= len(ins_to_sem):
continue
ins_to_sem[i] = 1 #if im['semanticLabel'] == 'teddy' else 2
return np.take(ins_to_sem, instance_data)
def get_camera_params(self, viewport):
"""Get active camera intrinsic and extrinsic parameters.
Returns:
A dict of the active camera's parameters.
pose (numpy.ndarray): camera position in world coordinates,
fov (float): horizontal field of view in radians
focal_length (float)
horizontal_aperture (float)
view_projection_matrix (numpy.ndarray(dtype=float64, shape=(4, 4)))
resolution (dict): resolution as a dict with 'width' and 'height'.
clipping_range (tuple(float, float)): Near and Far clipping values.
"""
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath(viewport.get_active_camera())
prim_tf = UsdGeom.Camera(prim).GetLocalTransformation()
focal_length = prim.GetAttribute("focalLength").Get()
horiz_aperture = prim.GetAttribute("horizontalAperture").Get()
fov = 2 * math.atan(horiz_aperture / (2 * focal_length))
x_min, y_min, x_max, y_max = viewport.get_viewport_rect()
width, height = x_max - x_min, y_max - y_min
aspect_ratio = width / height
near, far = prim.GetAttribute("clippingRange").Get()
view_proj_mat = self.generic_helper_lib.get_view_proj_mat(prim, aspect_ratio, near, far)
return {
"pose": np.array(prim_tf),
"fov": fov,
"focal_length": focal_length,
"horizontal_aperture": horiz_aperture,
"view_projection_matrix": view_proj_mat,
"resolution": {"width": width, "height": height},
"clipping_range": (near, far),
}
def get_pose(self):
"""Get pose of all objects with a semantic label.
"""
stage = omni.usd.get_context().get_stage()
mappings = self.generic_helper_lib.get_instance_mappings()
pose = []
for m in mappings:
prim_path = m[0]
prim = stage.GetPrimAtPath(prim_path)
prim_tf = UsdGeom.Xformable(prim).ComputeLocalToWorldTransform(0.0)
pose.append((str(prim_path), m[1], str(m[2]), np.array(prim_tf)))
return pose
async def initialize_async(self, viewport, sensor_types, timeout=10):
""" Initialize sensors in the list provided.
Args:
viewport (omni.kit.viewport._viewport.IViewportWindow): Viewport from which to retrieve/create sensor.
sensor_types (list of omni.syntheticdata._syntheticdata.SensorType): List of sensor types to initialize.
timeout (int): Maximum time in seconds to attempt to initialize sensors.
"""
start = time.time()
is_initialized = False
while not is_initialized and time.time() < (start + timeout):
sensors = []
for sensor_type in sensor_types:
sensors.append(self.sensor_helper_lib.create_or_retrieve_sensor(viewport, sensor_type))
await omni.kit.app.get_app_interface().next_update_async()
is_initialized = not any([not self.sd_interface.is_sensor_initialized(s) for s in sensors])
if not is_initialized:
unititialized = [s for s in sensors if not self.sd_interface.is_sensor_initialized(s)]
raise TimeoutError(f"Unable to initialized sensors: [{unititialized}] within {timeout} seconds.")
await omni.kit.app.get_app_interface().next_update_async() # Extra frame required to prevent access violation error
def get_groundtruth(self, gt_sensors, viewport, verify_sensor_init=True):
"""Get groundtruth from specified gt_sensors.
Args:
gt_sensors (list): List of strings of sensor names. Valid sensors names: rgb, depth,
instanceSegmentation, semanticSegmentation, boundingBox2DTight,
boundingBox2DLoose, boundingBox3D, camera
viewport (omni.kit.viewport._viewport.IViewportWindow): Viewport from which to retrieve/create sensor.
verify_sensor_init (bool): Additional check to verify creation and initialization of sensors.
Returns:
Dict of sensor outputs
"""
if isinstance(gt_sensors, str):
gt_sensors = (gt_sensors,)
# Create and initialize sensors
while verify_sensor_init:
flag = 0
# Render frame
self.app.update()
for sensor_name in gt_sensors:
if sensor_name != "camera" and sensor_name != "pose":
current_sensor = self.sensor_helper_lib.create_or_retrieve_sensor(
viewport, self.sensor_types[sensor_name]
)
if not self.sd_interface.is_sensor_initialized(current_sensor):
flag = 1
# Render frame
self.app.update()
self.app.update()
if flag == 0:
break
gt = {}
sensor_state = {}
# Process non-RT-only sensors
for sensor in gt_sensors:
if sensor not in ["camera", "pose"]:
if sensor == "instanceSegmentation":
gt[sensor] = self.sensor_helpers[sensor](viewport, parsed=True, return_mapping=True)
elif sensor == "boundingBox3D":
gt[sensor] = self.sensor_helpers[sensor](viewport, parsed=True, return_corners=True)
else:
gt[sensor] = self.sensor_helpers[sensor](viewport)
current_sensor = self.sensor_helper_lib.create_or_retrieve_sensor(viewport, self.sensor_types[sensor])
current_sensor_state = self.sd_interface.is_sensor_initialized(current_sensor)
sensor_state[sensor] = current_sensor_state
else:
gt[sensor] = self.sensor_helpers[sensor](viewport)
gt["state"] = sensor_state
return gt
| 9,968 | Python | 42.532751 | 124 | 0.623596 |
NVlabs/ACID/PlushSim/scripts/attic_scene.py | import os
import cv2
import time
import random
import asyncio
import numpy as np
from python_app import OmniKitHelper
import omni
import carb
from utils import *
RESOLUTION=720
# specify a custom config
CUSTOM_CONFIG = {
"width": RESOLUTION,
"height": RESOLUTION,
"anti_aliasing": 3, # 3 for dlss, 2 for fxaa, 1 for taa, 0 to disable aa
"renderer": "RayTracedLighting",
"samples_per_pixel_per_frame": 128,
"max_bounces": 10,
"max_specular_transmission_bounces": 6,
"max_volume_bounces": 4,
"subdiv_refinement_level": 2,
"headless": True,
"sync_loads": True,
"experience": f'{os.environ["EXP_PATH"]}/omni.bloky.kit',
}
"""
plush animal material: /Root/physics/stuff_animal
magic gripper: /Root/physics/magic_gripper
real object group: /Root/physics/real_objects
magic object group: /Root/physics/magic_objects
"""
class attic_scene(object):
def __init__(self,
SCENE_PATH,
PLUSH_ANIMAL_PATH,
PLUSH_SCALE=4,
FALL_MAX=300,
REST_THRESHOLD=8,
PHYSX_DT=1/150.,
SAVE_EVERY=25,
DROP_MIN=20,
RESET_STATIC=True,
RAND_LAYOUT=True,
RAND_LIGHTS=True,
ROBOT_SPEED=1.):
for k,v in locals().items():
if k != 'self':
self.__dict__[k] = v
self.plush_animal_mat = "/Root/physics/stuff_animal"
self.magic_gripper = "/Root/physics/magic_gripper"
self.fingerL = "/Root/physics/magic_gripper/fingerL"
self.fingerR = "/Root/physics/magic_gripper/fingerR"
self.real_object_group = "/Root/physics/real_objects"
self.magic_object_group = "/Root/physics/magic_objects"
self.front_path = "/Root/scene_front"
self.back_path = "/Root/scene_back"
self.scene_range = np.array([[-50*12,-50*8,0],[50*12,50*8,50*8]])
self.drop_range = np.array([[-50*self.PLUSH_SCALE,-50*self.PLUSH_SCALE,],
[50*self.PLUSH_SCALE,50*self.PLUSH_SCALE,]]) #/ 2.
self.back_clutter_range = np.array([[-50*12,50*8,],[50*12,50*12,]])
self.total_range = np.array([[-50*12,-50*12,0],[50*12,50*12,50*8]])
self.kit = OmniKitHelper(CUSTOM_CONFIG)
self.kit.set_physics_dt(physics_dt=self.PHYSX_DT)
physx_interface = omni.physx.get_physx_interface()
physx_interface.force_load_physics_from_usd()
physx_interface.reset_simulation()
async def load_stage(path):
await omni.usd.get_context().open_stage_async(path)
setup_task = asyncio.ensure_future(load_stage(SCENE_PATH))
while not setup_task.done():
self.kit.update()
self.kit.setup_renderer()
self.kit.update()
self.stage = omni.usd.get_context().get_stage()
self.front_group = self.stage.GetPrimAtPath(self.front_path)
self.back_group = self.stage.GetPrimAtPath(self.back_path)
from syntheticdata import SyntheticDataHelper
self.sd_helper = SyntheticDataHelper()
# force RayTracedLighting mode for better performance while simulating physics
self.kit.set_setting("/rtx/rendermode", "RayTracedLighting")
# wait until all materials are loaded
print("waiting for things to load...")
# if self.kit.is_loading():
# time.sleep(10)
while self.kit.is_loading():
time.sleep(0.1)
# set up cameras
self._setup_cameras()
_viewport_api = omni.kit.viewport.get_viewport_interface()
viewport = _viewport_api.get_instance_list()[0]
self._viewport = _viewport_api.get_viewport_window(viewport)
# touch the sensors to kick in anti-aliasing
for _ in range(20):
_ = self.sd_helper.get_groundtruth(
[ "rgb","depth","instanceSegmentation","semanticSegmentation",], self._viewport)
# set up objects
self._import_plush_animal(PLUSH_ANIMAL_PATH)
self._setup_robots()
# # start off Omniverse
self.kit.play()
# store original sim and vis points for reset
self.sim_og_pts, self.vis_og_pts = self._get_plush_points()
# # stop Omniverse
# self.kit.pause()
# reset the scene
self.frame = 0
self.reset()
def step(self):
self.kit.update(self.PHYSX_DT)
self.frame += 1
return self.frame
def sample_action(self, grasp_point=None):
if grasp_point is None:
gt = self.sd_helper.get_groundtruth(
[ "rgb","depth","instanceSegmentation","semanticSegmentation",], self._viewport)
pts = get_partial_point_cloud(self._viewport, project_factor=100.)
semseg = gt['semanticSegmentation']
kernel = np.ones((2,2), np.uint8)
semseg = cv2.erode(semseg, kernel, iterations=1)
plush_pts = np.where(semseg == 1)
if len(plush_pts[0]) == 0:
return None
idx = random.randint(0,len(plush_pts[0])-1)
grasp_pixel = (plush_pts[0][idx], plush_pts[1][idx])
grasp_point = tuple(pts[grasp_pixel[0], grasp_pixel[1],:])
else:
grasp_pixel = None
target_point = self._sample_displacement_vector(grasp_point)
if target_point is None:
return None
return grasp_point, target_point, grasp_pixel
def reset(self):
self.kit.stop()
from pxr import Gf
self.frame = 0
print("Reseting plush geometry...")
self._reset_plush_geometry(self.sim_og_pts, self.vis_og_pts)
print("Finished reseting plush geometry...")
# randonly drop the plush into the scene
print("Reseting plush translation...")
self.plush_translateOp.Set(Gf.Vec3f((0.,0.,250.)))
print("Reseting plush rotation...")
def randrot():
return random.random() * 360.
rotx,roty,rotz = randrot(), randrot(), randrot()
self.plush_rotationOp.Set(rpy2quat(rotx,roty,rotz))
print("Finished reseting plush pose...")
print("Reseting scene...")
self._randomize_scene()
print("Finished reseting scene...")
self.kit.play()
# wait until stable
if self.RESET_STATIC:
print("Waiting to reach stable...")
for _ in range(self.DROP_MIN):
self.step()
for ff in range(self.FALL_MAX*6):
self.step()
if self.check_scene_static():
print(f"Initial configuration becomes static after {ff} steps")
break
print("Reset Finished")
self.frame = 0
def reset_to(self, state):
self.kit.stop()
loc = state['loc']
rot = state['rot']
sim = state['sim']
vis = state['vis']
self._reset_plush_geometry(sim, vis)
self.plush_translateOp.Set(loc)
self.plush_rotationOp.Set(rot)
self.kit.play()
def check_scene_static(self):
_,_,_,v = self._get_object_velocity_stats()
return v < self.REST_THRESHOLD
def get_scene_metadata(self):
from pxr import PhysxSchema
sbAPI = PhysxSchema.PhysxDeformableAPI(self.plush)
faces = sbAPI.GetSimulationIndicesAttr().Get()
return {'plush_path': self.PLUSH_ANIMAL_PATH,
'sim_faces':np.array(faces, int).tolist(),
'sim_pts':np.array(self.sim_og_pts, np.float16).tolist(),
'vis_pts':np.array(self.vis_og_pts, np.float16).tolist(),
'scene_range': self.scene_range.tolist(),
'back_clutter_range': self.back_clutter_range.tolist(),
'cam_info': self._get_camera_info()}
# background state is different per reset
def get_scene_background_state(self):
collider = {}
for p in find_immediate_children(self.front_group):
name = str(p.GetPath()).split("/")[-1]
e,f = find_collider(p)
collider[f"{name}_box"] = e
collider[f"{name}_tran"] = f
for p in find_immediate_children(self.back_group):
name = str(p.GetPath()).split("/")[-1]
e,f = find_collider(p)
collider[f"{name}_box"] = e
collider[f"{name}_tran"] = f
return collider
def get_scene_state_plush(self,raw=False,convert_to=None):
sim,vis = self._get_plush_points()
loc,rot,scale = self._get_plush_loc(),self._get_plush_rot(),self._get_plush_scale()
if not raw:
loc,rot,scale = tuple(loc),eval(str(rot)),tuple(scale)
state = {'sim':sim, 'vis':vis,
'loc':loc, 'rot':rot, 'scale':scale}
if convert_to is not None:
for k,v in state.items():
state[k] = np.array(v, convert_to)
return state
def get_observations(self,
sensors=["rgb","depth",
# "instanceSegmentation",
"semanticSegmentation",],
partial_pointcloud=False):
frame = self.sd_helper.get_groundtruth(sensors, self._viewport)
gt = {}
gt['rgb_img'] = frame['rgb'][:,:,:-1]
gt['seg_img'] = frame['semanticSegmentation']
gt['dep_img'] = frame['depth'].squeeze()
if partial_pointcloud:
gt['pxyz'] = get_partial_point_cloud(self._viewport, project_factor=100.)
return gt
################################################################
#
# Below are "private" functions ;)
#
################################################################
def _import_plush_animal(self, usda_path):
from omni.physx.scripts import physicsUtils
mesh_name = usda_path.split('/')[-1].split('.')[0]
from pxr import PhysxSchema,UsdGeom,UsdShade,Semantics
###################
# import object
abspath = carb.tokens.get_tokens_interface().resolve(usda_path)
physics_root = "/Root"
assert self.stage.DefinePrim(physics_root+f"/{mesh_name}").GetReferences().AddReference(abspath)
self.mesh_path = f"{physics_root}/{mesh_name}/{mesh_name}_obj/mesh"
self.plush= self.stage.GetPrimAtPath(self.mesh_path)
###################
# add deformable property
schema_parameters = {
"self_collision": True,
"vertex_velocity_damping": 0.005,
"sleep_damping": 10,
"sleep_threshold": 5,
"settling_threshold": 11,
"solver_position_iteration_count": 60,
"collisionRestOffset": 0.1,
"collisionContactOffset": 0.5,
"voxel_resolution": 45,
}
skin_mesh = UsdGeom.Mesh.Get(self.stage, self.mesh_path)
skin_mesh.AddTranslateOp().Set(Gf.Vec3f(0.0, 0.0, 300.0))
skin_mesh.AddOrientOp().Set(Gf.Quatf(0.707, 0.707, 0, 0))
skin_points = skin_mesh.GetPointsAttr().Get()
skin_indices = physicsUtils.triangulateMesh(skin_mesh)
# Create tet meshes for simulation and collision based on the skin mesh
simulation_resolution = schema_parameters["voxel_resolution"]
skin_mesh_scale = Gf.Vec3f(1.0, 1.0, 1.0)
collision_points, collision_indices = physicsUtils.create_conforming_tetrahedral_mesh(skin_points, skin_indices)
simulation_points, simulation_indices = physicsUtils.create_voxel_tetrahedral_mesh(collision_points, collision_indices, skin_mesh_scale, simulation_resolution)
# Apply PhysxDeformableBodyAPI and PhysxCollisionAPI to skin mesh and set parameter and tet meshes
deformable_body_api = PhysxSchema.PhysxDeformableBodyAPI.Apply(skin_mesh.GetPrim())
deformable_body_api.CreateSolverPositionIterationCountAttr().Set(schema_parameters['solver_position_iteration_count'])
deformable_body_api.CreateSelfCollisionAttr().Set(schema_parameters['self_collision'])
deformable_body_api.CreateCollisionIndicesAttr().Set(collision_indices)
deformable_body_api.CreateCollisionRestPointsAttr().Set(collision_points)
deformable_body_api.CreateSimulationIndicesAttr().Set(simulation_indices)
deformable_body_api.CreateSimulationRestPointsAttr().Set(simulation_points)
deformable_body_api.CreateVertexVelocityDampingAttr().Set(schema_parameters['vertex_velocity_damping'])
deformable_body_api.CreateSleepDampingAttr().Set(schema_parameters['sleep_damping'])
deformable_body_api.CreateSleepThresholdAttr().Set(schema_parameters['sleep_threshold'])
deformable_body_api.CreateSettlingThresholdAttr().Set(schema_parameters['settling_threshold'])
PhysxSchema.PhysxCollisionAPI.Apply(skin_mesh.GetPrim())
###################
# add deformable material
def add_physics_material_to_prim(stage, prim, materialPath):
bindingAPI = UsdShade.MaterialBindingAPI.Apply(prim)
materialPrim = UsdShade.Material(stage.GetPrimAtPath(materialPath))
bindingAPI.Bind(materialPrim, UsdShade.Tokens.weakerThanDescendants, "physics")
add_physics_material_to_prim(self.stage, self.plush, self.plush_animal_mat)
###################
# add collision group
physicsUtils.add_collision_to_collision_group(self.stage, self.mesh_path, self.real_object_group)
###################
# add semantic info
sem = Semantics.SemanticsAPI.Apply(self.stage.GetPrimAtPath(self.mesh_path), "Semantics")
sem.CreateSemanticTypeAttr()
sem.CreateSemanticDataAttr()
sem.GetSemanticTypeAttr().Set("class")
sem.GetSemanticDataAttr().Set("plush")
###################
# standarize transform
physicsUtils.setup_transform_as_scale_orient_translate(self.plush)
xform = UsdGeom.Xformable(self.plush)
ops = xform.GetOrderedXformOps()
self.plush_translateOp = ops[0]
self.plush_rotationOp = ops[1]
self.plush_scaleOp = ops[2]
scale_factor = self.PLUSH_SCALE
self.plush_scaleOp.Set((scale_factor,scale_factor,scale_factor))
def _get_object_velocity_stats(self):
from pxr import PhysxSchema
sbAPI = PhysxSchema.PhysxDeformableAPI(self.plush)
velocity = np.array(sbAPI.GetSimulationVelocitiesAttr().Get())
vnorm = np.linalg.norm(velocity, axis=1)
return np.percentile(vnorm, [0,50,90,99])
def _setup_robots(self):
actor = self.stage.GetPrimAtPath(self.magic_gripper)
fingerL = self.stage.GetPrimAtPath(self.fingerL)
fingerR = self.stage.GetPrimAtPath(self.fingerR)
self.gripper = magic_eef(actor,
self.stage,
eef_default_loc=(0.,0.,600.),
default_speed=self.ROBOT_SPEED,
fingerL=fingerL,
fingerR=fingerR)
def _setup_cameras(self):
from pxr import UsdGeom
stage = omni.usd.get_context().get_stage()
# Need to set this before setting viewport window size
carb.settings.acquire_settings_interface().set_int("/app/renderer/resolution/width", -1)
carb.settings.acquire_settings_interface().set_int("/app/renderer/resolution/height", -1)
viewport_window = omni.kit.viewport.get_default_viewport_window()
viewport_window.set_active_camera("/Root/cam_light/Camera")
viewport_window.set_texture_resolution(RESOLUTION,RESOLUTION)
viewport_window.set_window_size(RESOLUTION, RESOLUTION)
def _get_plush_loc(self):
return self.plush_translateOp.Get()
def _get_plush_rot(self):
return self.plush_rotationOp.Get()
def _get_plush_scale(self):
return self.plush_scaleOp.Get()
def _get_plush_points(self):
from pxr import PhysxSchema
sbAPI = PhysxSchema.PhysxDeformableBodyAPI(self.plush)
sim = sbAPI.GetSimulationPointsAttr().Get()
mesh = UsdGeom.Mesh(self.plush)
vis = mesh.GetPointsAttr().Get()
return sim, vis
def _get_camera_info(self):
cam_info = {}
camera_pose, camera_intr = get_camera_params(self._viewport)
cam_name = get_camera_name(self._viewport)
cam_info[cam_name] = [camera_pose.tolist(), camera_intr.tolist()]
return cam_info
def _randomize_collection(self, collection_prim, scene_range, drop_range=None, rand_rot=True, padding=True):
extents,objs = [],[]
for p in find_immediate_children(collection_prim):
objs.append(str(p.GetPath()))
extent, transform = find_collider(p)
extents.append(transform_verts(extent, transform))
objects = [standardize_bbox(bbox) for bbox in np.array(extents)[:,:,:-1]]
canvas = get_canvas(scene_range)
if drop_range is not None:
fill_canvas(canvas, scene_range, drop_range)
translations = []
for b,n in zip(objects,objs):
for _ in range(3):
t = sample_bbox_translation(b, scene_range)
if padding:
tb = scale(pad_to_square(b + t))
else:
tb = b + t
if not overlaps_with_current(canvas, scene_range, tb):
fill_canvas(canvas, scene_range, tb)
translations.append((n,t))
break
if len(translations) == 0 or translations[-1][0] != n:
translations.append((n,np.array([0,-2000])))
def randrot():
return random.random() * 360.
from pxr import UsdGeom
from omni.physx.scripts import physicsUtils
for n,t in translations:
xform = UsdGeom.Xformable(self.stage.GetPrimAtPath(n))
physicsUtils.setup_transform_as_scale_orient_translate(xform)
ops = xform.GetOrderedXformOps()
translateOp = ops[0]
translateOp.Set(tuple(np.array(tuple(translateOp.Get())) + np.append(t, 0)))
if rand_rot:
orientOp = ops[1]
orientOp.Set(rpy2quat(0,0,randrot()))
def _randomize_lighting(self):
domelight = self.stage.GetPrimAtPath("/Root/cam_light/Lights/DomeLight")
light = self.stage.GetPrimAtPath("/Root/cam_light/Lights/DistantLight")
light1 = self.stage.GetPrimAtPath("/Root/cam_light/Lights/DistantLight_01")
temp = np.random.rand(1)[0] * 5000 + 2500
domelight.GetAttribute('colorTemperature').Set(temp)
light.GetAttribute('colorTemperature').Set(temp)
light1.GetAttribute('colorTemperature').Set(temp)
int_range = 10000
int_min = 2500
for l in [domelight, light, light1]:
intensity = np.random.rand(1)[0] * int_range + int_min
l.GetAttribute('intensity').Set(intensity)
def _randomize_scene(self):
if self.RAND_LAYOUT:
# randomize front scene
self._randomize_collection(self.front_group, self.scene_range[:,:-1], self.drop_range)
# randomize back scene
self._randomize_collection(self.back_group, self.back_clutter_range,rand_rot=False, padding=False)
if self.RAND_LIGHTS:
# randomize lights
self._randomize_lighting()
def _get_2d_layout_occupancy_map(self):
extents = []
for p in find_immediate_children(self.front_group):
extent, transform = find_collider(p)
extents.append(transform_verts(extent, transform))
for p in find_immediate_children(self.back_group):
extent, transform = find_collider(p)
extents.append(transform_verts(extent, transform))
objects = [standardize_bbox(bbox) for bbox in np.array(extents)[:,:,:-1]]
#canvas = get_canvas(self.scene_range[:,:-1])
canvas = get_canvas(self.total_range[:,:-1])
for b in objects:
fill_canvas(canvas, self.total_range[:,:-1], b)
return canvas
def _sample_displacement_vector(self, grasp_point):
sampled_for = 0
mean_len = 160
std_len = 80
max_len = 240
min_len = 80
canvas = self._get_2d_layout_occupancy_map()
while(True):
sampled_for = sampled_for + 1
move_len = np.clip(np.random.normal(loc=mean_len,scale=std_len), min_len, max_len)
move_dir = sample_direction_zup(100).squeeze()
#move_dir[1,:] = np.abs(move_dir[1,:])
move_vec = move_dir * move_len
target_pts = grasp_point + move_vec.T
in_world = np.logical_and(
target_pts > self.total_range[0],
target_pts < self.total_range[1]).all(axis=1)
occupancies = []
try:
# assure that no obstacle is in path for length times 1.3
for i in range(int(max_len*1.3)):
temp = grasp_point + (target_pts - grasp_point) / max_len * i
temp[:,0] = np.clip(target_pts[:,0], self.total_range[0,0], self.total_range[1,0])
temp[:,1] = np.clip(target_pts[:,1], self.total_range[0,1], self.total_range[1,1])
occupancies.append(get_occupancy_value(
canvas, self.total_range[:,:-1], temp[:,:-1]))
path_no_collision = (np.array(occupancies) == 0).all(axis=0)
viable = np.logical_and(in_world, path_no_collision)
in_idx = np.nonzero(viable)[0]
except:
continue
if len(in_idx) > 0:
target_point = target_pts[np.random.choice(in_idx)]
return target_point
else:
if sampled_for > 10:
break
return None
def _reset_plush_geometry(self, sim, vis):
from pxr import PhysxSchema, Gf, Vt
# reset simulation points
sbAPI = PhysxSchema.PhysxDeformableBodyAPI(self.plush)
sbAPI.GetSimulationPointsAttr().Set(sim)
# reset simulation points velocity
sbAPI = PhysxSchema.PhysxDeformableAPI(self.plush)
velocity = np.array(sbAPI.GetSimulationVelocitiesAttr().Get())
zero_velocity = np.zeros_like(velocity)
velocity_vec = Vt.Vec3fArray([Gf.Vec3f(tuple(m)) for m in zero_velocity])
sbAPI.GetSimulationVelocitiesAttr().Set(velocity_vec)
# reset visual points
mesh = UsdGeom.Mesh(self.plush)
mesh.GetPointsAttr().Set(vis) | 22,641 | Python | 42.710425 | 167 | 0.591184 |
NVlabs/ACID/PlushSim/scripts/utils.py | import os
import math
import omni
import numpy as np
from PIL import Image
from pxr import UsdGeom, Usd, UsdPhysics, Gf
import matplotlib.pyplot as plt
################################################################
# State Saving Utils
# (Geometry)
################################################################
def transform_points_cam_to_world(cam_pts, camera_pose):
world_pts = np.transpose(
np.dot(camera_pose[0:3, 0:3], np.transpose(cam_pts)) + np.tile(camera_pose[0:3, 3:], (1, cam_pts.shape[0])))
return world_pts
def project_depth_world_space(depth_image, camera_intr, camera_pose, project_factor=1.):
cam_pts = project_depth_cam_space(depth_image, camera_intr, keep_dim=False, project_factor=project_factor)
world_pts = transform_points_cam_to_world(cam_pts, camera_pose)
W, H = depth_image.shape
pts = world_pts.reshape([W, H, 3])
return pts
def project_depth_cam_space(depth_img, camera_intrinsics, keep_dim=True, project_factor=1.):
# Get depth image size
im_h = depth_img.shape[0]
im_w = depth_img.shape[1]
# Project depth into 3D point cloud in camera coordinates
pix_x, pix_y = np.meshgrid(np.linspace(0, im_w - 1, im_w), np.linspace(0, im_h - 1, im_h))
cam_pts_x = np.multiply(pix_x - im_w / 2., -depth_img / camera_intrinsics[0, 0])
cam_pts_y = np.multiply(pix_y - im_h / 2., depth_img / camera_intrinsics[1, 1])
cam_pts_z = depth_img.copy()
cam_pts_x.shape = (im_h * im_w, 1)
cam_pts_y.shape = (im_h * im_w, 1)
cam_pts_z.shape = (im_h * im_w, 1)
cam_pts = np.concatenate((cam_pts_x, cam_pts_y, cam_pts_z), axis=1) * project_factor
# print("cam_pts: ", cam_pts.max(axis=0), cam_pts.min(axis=0))
if keep_dim:
cam_pts = cam_pts.reshape([im_h, im_w, 3])
return cam_pts
def get_camera_params(viewport):
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath(viewport.get_active_camera())
prim_tf = np.array(UsdGeom.Camera(prim).GetLocalTransformation())
focal_length = prim.GetAttribute("focalLength").Get()
horiz_aperture = prim.GetAttribute("horizontalAperture").Get()
fov = 2 * math.atan(horiz_aperture / (2 * focal_length))
image_w, image_h = viewport.get_texture_resolution()
camera_focal_length = (float(image_w) / 2) / np.tan(fov/ 2)
cam_intr = np.array(
[[camera_focal_length, 0, float(image_h) / 2],
[0, camera_focal_length, float(image_w) / 2],
[0, 0, 1]])
return prim_tf.T, cam_intr
def get_partial_point_cloud(viewport, in_world_space=True, project_factor=1.):
from omni.syntheticdata import sensors
data = sensors.get_depth_linear(viewport)
h, w = data.shape[:2]
depth_data = -np.frombuffer(data, np.float32).reshape(h, w, -1)
camera_pose, camera_intr = get_camera_params(viewport)
if in_world_space:
return project_depth_world_space(depth_data.squeeze(), camera_intr, camera_pose, project_factor=project_factor)
else:
return project_depth_cam_space(depth_data.squeeze(), camera_intr, project_factor=project_factor)
def export_visual_mesh(prim, export_path, loc=None, rot=None, binarize=True):
assert prim.IsA(UsdGeom.Mesh), "prim needs to be a UsdGeom.Mesh"
mesh = UsdGeom.Mesh(prim)
points = mesh.GetPointsAttr().Get()
if binarize:
path = os.path.splitext(export_path)[0]+'.npy'
np.save(path, np.array(points, np.float16))
else:
print(export_path)
faces = np.array(mesh.GetFaceVertexIndicesAttr().Get()).reshape(-1,3) + 1
uv = mesh.GetPrimvar("st").Get()
with open(export_path, "w") as fp:
fp.write("mtllib teddy.mtl\nusemtl Material.004\n")
for x,y,z in points:
fp.write(f"v {x:.3f} {y:.3f} {z:.3f}\n")
for u,v in uv:
fp.write(f"vt {u:=.4f} {v:.4f}\n")
for i, (x,y,z) in enumerate(faces):
fp.write(f"f {x}/{i*3+1} {y}/{i*3+2} {z}/{i*3+3}\n")
def get_sim_points(prim, loc=None, rot=None):
from pxr import PhysxSchema
sbAPI = PhysxSchema.PhysxDeformableBodyAPI(prim)
points = sbAPI.GetSimulationPointsAttr().Get()
if rot is not None:
points = np.array(points)
w,x,y,z = eval(str(rot))
from scipy.spatial.transform import Rotation
rot = Rotation.from_quat(np.array([x,y,z,w]))
points = rot.apply(points)
if loc is not None:
loc = np.array(tuple(loc))
points = points + loc
return points
def get_sim_faces(prim):
from pxr import PhysxSchema
sbAPI = PhysxSchema.PhysxDeformableAPI(prim)
faces = sbAPI.GetSimulationIndicesAttr().Get()
return faces
def export_simulation_voxels(prim, export_path, binarize=True, export_faces=False):
points = get_sim_points(prim)
if export_faces:
faces = get_sim_faces(prim)
if binarize:
path = os.path.splitext(export_path)[0]+'.npy'
if export_faces:
np.savez(path, points=np.array(points, np.float16), faces=np.array(faces, int))
else:
np.save(path, np.array(points, np.float16))
else:
with open(export_path, 'w') as fp:
for p in points:
fp.write(f"v {p[0]:.3f} {p[1]:.3f} {p[2]:.3f}\n")
if export_faces:
faces = np.array(faces, int).reshape([-1,4]) + 1
for f in faces:
fp.write(f"f {f[0]} {f[1]} {f[2]} {f[3]}\n")
def visualize_sensors(gt, save_path):
from omni.syntheticdata import visualize
# GROUNDTRUTH VISUALIZATION
# Setup a figure
fig, axes = plt.subplots(1, 3, figsize=(20, 6))
axes = axes.flat
for ax in axes:
ax.axis("off")
# RGB
axes[0].set_title("RGB")
for ax in axes[:-1]:
ax.imshow(gt["rgb"])
# DEPTH
axes[1].set_title("Depth")
depth_data = np.clip(gt["depth"], 0, 255)
axes[1].imshow(visualize.colorize_depth(depth_data.squeeze()))
# SEMSEG
axes[2].set_title("Semantic Segmentation")
semantic_seg = gt["semanticSegmentation"]
semantic_rgb = visualize.colorize_segmentation(semantic_seg)
axes[2].imshow(semantic_rgb, alpha=0.7)
# Save figure
fig.savefig(save_path)
plt.close(fig)
def save_frame(frame_name, frame_data, save_dir,
save_rgb=True, save_seg=True, save_depth=True, save_partial_pointcloud=False):
if save_rgb:
rgb = frame_data['rgb_img']
Image.fromarray(rgb).save(f"{save_dir}/rgb_{frame_name}.jpg")
if save_seg:
seg= frame_data['seg_img']
sem = np.tile(seg[:,:,np.newaxis], (1,1,3)).astype(np.uint8) * 255
Image.fromarray(sem).save(f"{save_dir}/seg_{frame_name}.jpg")
if save_depth:
depth_img = Image.fromarray((frame_data['dep_img'].squeeze() * 1000).astype(np.uint16), mode='I;16').convert(mode='I')
depth_img.save(f"{save_dir}/depth_{frame_name}.png")
def save_state(state_name, state_data, save_dir):
loc, rot, sim, vis = state_data
state_dict = {}
state_dict['loc'] = np.array(tuple(loc))
state_dict['rot'] = np.array(eval(str(rot)))
state_dict['sim'] = np.array(sim)
state_dict['vis'] = np.array(vis)
np.savez(f"{save_dir}/state_{state_name}.npz", **state_dict)
################################################################
# Interaction Utils
################################################################
def sample_pick_point(partial_point_cloud, segmentation):
im_h = segmentation.shape[0]
im_w = segmentation.shape[1]
# point cloud "image" height and width
pc_h = partial_point_cloud.shape[0]
pc_w = partial_point_cloud.shape[1]
assert im_h == pc_h and im_w == pc_w, "partial_point_cloud dimension should match with that of segmentation mask"
def sample_spherical(npoints, ndim=3):
vec = np.random.randn(ndim, npoints)
vec /= np.linalg.norm(vec, axis=0)
return vec
def sample_direction(npoints):
phi = np.random.randn(npoints) * 2 * np.pi
theta = np.clip(np.random.normal(loc=np.pi / 4.,scale=np.pi / 12., size=npoints), np.pi / 6., np.pi / 2.)
x = np.cos(phi) * np.sin(theta)
z = np.sin(phi) * np.sin(theta)
y = np.cos(theta)
vec = np.vstack([x,y,z])
return vec
def sample_direction_zup(npoints):
phi = np.random.randn(npoints) * 2 * np.pi
theta = np.clip(np.random.normal(loc=np.pi / 4.,scale=np.pi / 12., size=npoints), np.pi / 6., np.pi / 2.)
x = np.cos(phi) * np.sin(theta)
y = np.sin(phi) * np.sin(theta)
z = np.cos(theta)
vec = np.vstack([x,y,z])
return vec
def interpolate(start_loc, end_loc, speed):
start_loc = np.array(start_loc)
end_loc = np.array(end_loc)
dist = np.linalg.norm(end_loc - start_loc)
chunks = dist // speed
return start_loc + np.outer(np.arange(chunks+1,dtype=float), (end_loc - start_loc) / chunks)
class magic_eef(object):
def __init__(self, end_effector, stage, eef_default_loc=None, default_speed=1,
fingerL=None, fingerR=None):
self.end_effector = end_effector
self.eef_default_loc = eef_default_loc
self.default_speed = default_speed
self.stage = stage
xform = UsdGeom.Xformable(end_effector)
self.ops = xform.GetOrderedXformOps()
assert self.ops[0].GetOpType() == UsdGeom.XformOp.TypeTranslate,\
"Code is based on UsdGeom.Xformable with first op as translation"
assert self.ops[1].GetOpType() == UsdGeom.XformOp.TypeOrient,\
"Code is based on UsdGeom.Xformable with second op as orientation"
self.attachmentPath = None
self.set_translation(eef_default_loc)
self.fingerL=fingerL
if fingerL is not None:
xform = UsdGeom.Xformable(fingerL)
self.fingerL_ops = xform.GetOrderedXformOps()[0]
self.fingerL_ops.Set((-5,0,20))
self.fingerR=fingerR
if fingerR is not None:
xform = UsdGeom.Xformable(fingerR)
self.fingerR_ops = xform.GetOrderedXformOps()[0]
self.fingerL_ops.Set((5,0,20))
def get_translation(self):
return self.ops[0].Get()
def set_translation(self, loc):
self.ops[0].Set(loc)
def reset_translation(self):
self.set_translation(self.eef_default_loc)
def get_orientation(self):
return self.ops[1].Get()
def set_orientation(self, rot):
self.ops[1].Set(rot)
def grasp(self, target_object):
# enable collision
self.end_effector.GetAttribute("physics:collisionEnabled").Set(True)
# create magic grasp
self.attachmentPath = target_object.GetPath().AppendChild("rigidAttachment_0")
omni.kit.commands.execute(
"AddSoftBodyRigidAttachmentCommand",
target_attachment_path=self.attachmentPath,
softbody_path=target_object.GetPath(),
rigidbody_path=self.end_effector.GetPath(),
)
attachmentPrim = self.stage.GetPrimAtPath(self.attachmentPath)
assert attachmentPrim
assert attachmentPrim.GetAttribute("physxEnableHaloParticleFiltering").Set(True)
assert attachmentPrim.GetAttribute("physxEnableVolumeParticleAttachments").Set(True)
assert attachmentPrim.GetAttribute("physxEnableSurfaceTetraAttachments").Set(True)
omni.physx.get_physx_interface().release_physics_objects()
self.fingerL_ops.Set((-5,0,20))
self.fingerR_ops.Set((5,0,20))
def ungrasp(self):
assert self.attachmentPath is not None, "nothing is grasped! (there is no attachment registered)"
# release magic grasp
omni.kit.commands.execute(
"DeletePrimsCommand",
paths=[self.attachmentPath]
)
self.end_effector.GetAttribute("physics:collisionEnabled").Set(False)
omni.physx.get_physx_interface().release_physics_objects()
self.attachmentPath = None
self.fingerL_ops.Set((-80,0,20))
self.fingerR_ops.Set((80,0,20))
#self.reset_translation()
def plan_trajectory(self, start_loc, end_loc, speed=None):
return interpolate(start_loc, end_loc, self.default_speed if speed is None else speed)
################################
# Random utils
################################
def get_camera_name(viewport):
stage = omni.usd.get_context().get_stage()
return stage.GetPrimAtPath(viewport.get_active_camera()).GetName()
def rpy2quat(roll,pitch,yaw):
roll*=0.5
pitch*=0.5
yaw*=0.5
cr = math.cos(roll)
cp = math.cos(pitch)
cy = math.cos(yaw)
sr = math.sin(roll)
sp = math.sin(pitch)
sy = math.sin(yaw)
cpcy = cp * cy
spsy = sp * sy
spcy = sp * cy
cpsy = cp * sy
qx = (sr * cpcy - cr * spsy)
qy = (cr * spcy + sr * cpsy)
qz = (cr * cpsy - sr * spcy)
qw = cr * cpcy + sr * spsy
return Gf.Quatf(qw,qx,qy,qz)
################################
# Scene randomization utils
################################
def is_collider(prim):
try:
return prim.GetAttribute("physics:collisionEnabled").Get()
except:
return False
def find_collider(prim):
#from pxr import UsdPhysics
primRange = iter(Usd.PrimRange(prim))
extent, transform = None, None
for p in primRange:
#if p.HasAPI(UsdPhysics.CollisionAPI):
if is_collider(p):
extent = p.GetAttribute("extent").Get()
if extent is None:
# this means that the object is a cube
extent = np.array([[-50,-50,-50],[50,50,50]])
transform = omni.usd.get_world_transform_matrix(p, Usd.TimeCode.Default())
primRange.PruneChildren()
break
return np.array(extent), np.array(transform)
def find_immediate_children(prim):
primRange = Usd.PrimRange(prim)
primPath = prim.GetPath()
immediate_children = []
for p in primRange:
if p.GetPath().GetParentPath() == primPath:
immediate_children.append(p)
return immediate_children
def extent_to_cube(extent):
min_x,min_y,min_z = extent[0]
max_x,max_y,max_z = extent[1]
verts = np.array([
(max_x,max_y,max_z),
(max_x,max_y,min_z),
(max_x,min_y,max_z),
(max_x,min_y,min_z),
(min_x,max_y,max_z),
(min_x,max_y,min_z),
(min_x,min_y,max_z),
(min_x,min_y,min_z),])
faces = np.array([
(1,5,7,3),
(4,3,7,8),
(8,7,5,6),
(6,2,4,8),
(2,1,3,4),
(6,5,1,2),])
return verts, faces
def transform_verts(verts, transform):
verts_app = np.concatenate([verts,np.ones((verts.shape[0], 1))], axis=-1)
return (verts_app @ transform)[:,:-1]
def export_quad_obj(verts, faces, export_path):
with open(export_path, 'w') as fp:
for p in verts:
fp.write(f"v {p[0]:.3f} {p[1]:.3f} {p[2]:.3f}\n")
for f in faces:
fp.write(f"f {f[0]} {f[1]} {f[2]} {f[3]}\n")
def standardize_bbox(bbox):
return np.array([bbox.min(axis=0),bbox.max(axis=0)])
def get_bbox_translation_range(bbox, scene_range):
# bbox size
size_x,size_y = bbox[1] - bbox[0]
center_range = scene_range + np.array([[size_x, size_y],[-size_x,-size_y]]) / 2
center = np.mean(bbox, axis=0)
return center_range - center
def sample_bbox_translation(bbox, scene_range):
translation_range = get_bbox_translation_range(bbox, scene_range)
sample = np.random.rand(2)
return translation_range[0] + sample * (translation_range[1] - translation_range[0])
def get_canvas(scene_range):
scene_size = scene_range[1] - scene_range[0]
scene_size = ( scene_size * 1.1 ).astype(int)
return np.zeros(scene_size)
def fill_canvas(canvas, scene_range, bbox,val=1):
canvas_center = np.array(canvas.shape) / 2
cb = (bbox - np.mean(scene_range, axis=0) + canvas_center).astype(int)
if cb[0,0] < 0 or cb[0,1] < 0:
return
h,w = canvas.shape
if cb[1,0] >= h or cb[1,1] >= w:
return
canvas[cb[0,0]:cb[1,0], cb[0,1]:cb[1,1]] = val
def get_occupancy_value(canvas, scene_range, pts):
canvas_center = np.array(canvas.shape) / 2
pts = (pts - np.mean(scene_range, axis=0) + canvas_center).astype(int)
return canvas[pts[:,0], pts[:,1]]
def overlaps_with_current(canvas, scene_range, bbox,val=0):
canvas_center = np.array(canvas.shape) / 2
cb = (bbox - np.mean(scene_range, axis=0) + canvas_center).astype(int)
return (canvas[cb[0,0]:cb[1,0], cb[0,1]:cb[1,1]] != val).any()
def pad_to_square(bbox):
size_x,size_y = (bbox[1] - bbox[0]) / 2.
center = np.mean(bbox, axis=0)
length = max(size_x,size_y)
return np.stack([center-length,center+length])
def scale(bbox,factor=1.1):
size_x,size_y = (bbox[1] - bbox[0]) / 2. *factor
center = np.mean(bbox, axis=0)
return np.stack([center-[size_x,size_y],center+[size_x,size_y]])
| 16,913 | Python | 36.923767 | 126 | 0.601845 |
NVlabs/ACID/PlushSim/scripts/writer.py | #!/usr/bin/env python
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Helper class for writing groundtruth data offline.
"""
import atexit
import colorsys
import queue
import omni
import os
import threading
import numpy as np
from PIL import Image, ImageDraw
class DataWriter:
def __init__(self, data_dir, num_worker_threads, max_queue_size=500, sensor_settings=None):
from omni.isaac.synthetic_utils import visualization as vis
self.vis = vis
atexit.register(self.stop_threads)
self.data_dir = data_dir
# Threading for multiple scenes
self.num_worker_threads = num_worker_threads
# Initialize queue with a specified size
self.q = queue.Queue(max_queue_size)
self.threads = []
self._viewport = omni.kit.viewport.get_viewport_interface()
self.create_output_folders(sensor_settings)
def start_threads(self):
"""Start worker threads."""
for _ in range(self.num_worker_threads):
t = threading.Thread(target=self.worker, daemon=True)
t.start()
self.threads.append(t)
def stop_threads(self):
"""Waits for all tasks to be completed before stopping worker threads."""
print(f"Finish writing data...")
# Block until all tasks are done
self.q.join()
# Stop workers
for _ in range(self.num_worker_threads):
self.q.put(None)
for t in self.threads:
t.join()
print(f"Done.")
def worker(self):
"""Processes task from queue. Each tasks contains groundtruth data and metadata which is used to transform the output and write it to disk."""
while True:
groundtruth = self.q.get()
if groundtruth is None:
break
filename = groundtruth["METADATA"]["image_id"]
viewport_name = groundtruth["METADATA"]["viewport_name"]
for gt_type, data in groundtruth["DATA"].items():
if gt_type == "RGB":
self.save_image(viewport_name, gt_type, data, filename)
elif gt_type == "DEPTH":
if groundtruth["METADATA"]["DEPTH"]["NPY"]:
self.depth_folder = self.data_dir + "/" + str(viewport_name) + "/depth/"
np.save(self.depth_folder + filename + ".npy", data)
if groundtruth["METADATA"]["DEPTH"]["COLORIZE"]:
self.save_image(viewport_name, gt_type, data, filename)
elif gt_type == "INSTANCE":
self.save_segmentation(
viewport_name,
gt_type,
data,
filename,
groundtruth["METADATA"]["INSTANCE"]["WIDTH"],
groundtruth["METADATA"]["INSTANCE"]["HEIGHT"],
groundtruth["METADATA"]["INSTANCE"]["COLORIZE"],
groundtruth["METADATA"]["INSTANCE"]["NPY"],
)
elif gt_type == "SEMANTIC":
self.save_segmentation(
viewport_name,
gt_type,
data,
filename,
groundtruth["METADATA"]["SEMANTIC"]["WIDTH"],
groundtruth["METADATA"]["SEMANTIC"]["HEIGHT"],
groundtruth["METADATA"]["SEMANTIC"]["COLORIZE"],
groundtruth["METADATA"]["SEMANTIC"]["NPY"],
)
elif gt_type in ["BBOX2DTIGHT", "BBOX2DLOOSE"]:
self.save_bbox(
viewport_name,
gt_type,
data,
filename,
groundtruth["METADATA"][gt_type]["COLORIZE"],
groundtruth["DATA"]["RGB"],
groundtruth["METADATA"][gt_type]["NPY"],
)
elif gt_type == "CAMERA":
self.camera_folder = self.data_dir + "/" + str(viewport_name) + "/camera/"
np.save(self.camera_folder + filename + ".npy", data)
elif gt_type == "POSES":
self.poses_folder = self.data_dir + "/" + str(viewport_name) + "/poses/"
np.save(self.poses_folder + filename + ".npy", data)
else:
raise NotImplementedError
self.q.task_done()
def save_segmentation(
self, viewport_name, data_type, data, filename, width=1280, height=720, display_rgb=True, save_npy=True
):
self.instance_folder = self.data_dir + "/" + str(viewport_name) + "/instance/"
self.semantic_folder = self.data_dir + "/" + str(viewport_name) + "/semantic/"
# Save ground truth data locally as npy
if data_type == "INSTANCE" and save_npy:
np.save(self.instance_folder + filename + ".npy", data)
if data_type == "SEMANTIC" and save_npy:
np.save(self.semantic_folder + filename + ".npy", data)
if display_rgb:
image_data = np.frombuffer(data, dtype=np.uint8).reshape(*data.shape, -1)
num_colors = 50 if data_type == "SEMANTIC" else None
color_image = self.vis.colorize_segmentation(image_data, width, height, 3, num_colors)
# color_image = visualize.colorize_instance(image_data)
color_image_rgb = Image.fromarray(color_image, "RGB")
if data_type == "INSTANCE":
color_image_rgb.save(f"{self.instance_folder}/{filename}.png")
if data_type == "SEMANTIC":
color_image_rgb.save(f"{self.semantic_folder}/{filename}.png")
def save_image(self, viewport_name, img_type, image_data, filename):
self.rgb_folder = self.data_dir + "/" + str(viewport_name) + "/rgb/"
self.depth_folder = self.data_dir + "/" + str(viewport_name) + "/depth/"
if img_type == "RGB":
# Save ground truth data locally as png
rgb_img = Image.fromarray(image_data, "RGBA")
rgb_img.save(f"{self.rgb_folder}/{filename}.png")
elif img_type == "DEPTH":
# Convert linear depth to inverse depth for better visualization
image_data = image_data * 100
image_data = np.reciprocal(image_data)
# Save ground truth data locally as png
image_data[image_data == 0.0] = 1e-5
image_data = np.clip(image_data, 0, 255)
image_data -= np.min(image_data)
if np.max(image_data) > 0:
image_data /= np.max(image_data)
depth_img = Image.fromarray((image_data * 255.0).astype(np.uint8))
depth_img.save(f"{self.depth_folder}/{filename}.png")
def save_bbox(self, viewport_name, data_type, data, filename, display_rgb=True, rgb_data=None, save_npy=True):
self.bbox_2d_tight_folder = self.data_dir + "/" + str(viewport_name) + "/bbox_2d_tight/"
self.bbox_2d_loose_folder = self.data_dir + "/" + str(viewport_name) + "/bbox_2d_loose/"
# Save ground truth data locally as npy
if data_type == "BBOX2DTIGHT" and save_npy:
np.save(self.bbox_2d_tight_folder + filename + ".npy", data)
if data_type == "BBOX2DLOOSE" and save_npy:
np.save(self.bbox_2d_loose_folder + filename + ".npy", data)
if display_rgb and rgb_data is not None:
color_image = self.vis.colorize_bboxes(data, rgb_data)
color_image_rgb = Image.fromarray(color_image, "RGBA")
if data_type == "BBOX2DTIGHT":
color_image_rgb.save(f"{self.bbox_2d_tight_folder}/{filename}.png")
if data_type == "BBOX2DLOOSE":
color_image_rgb.save(f"{self.bbox_2d_loose_folder}/{filename}.png")
def create_output_folders(self, sensor_settings=None):
"""Checks if the sensor output folder corresponding to each viewport is created. If not, it creates them."""
if not os.path.exists(self.data_dir):
os.mkdir(self.data_dir)
if sensor_settings is None:
sensor_settings = dict()
viewports = self._viewport.get_instance_list()
viewport_names = [self._viewport.get_viewport_window_name(vp) for vp in viewports]
sensor_settings_viewport = {
"rgb": {"enabled": True},
"depth": {"enabled": True, "colorize": True, "npy": True},
"instance": {"enabled": True, "colorize": True, "npy": True},
"semantic": {"enabled": True, "colorize": True, "npy": True},
"bbox_2d_tight": {"enabled": True, "colorize": True, "npy": True},
"bbox_2d_loose": {"enabled": True, "colorize": True, "npy": True},
"camera": {"enabled": True, "npy": True},
"poses": {"enabled": True, "npy": True},
}
for name in viewport_names:
sensor_settings[name] = copy.deepcopy(sensor_settings_viewport)
for viewport_name in sensor_settings:
viewport_folder = self.data_dir + "/" + str(viewport_name)
if not os.path.exists(viewport_folder):
os.mkdir(viewport_folder)
for sensor_name in sensor_settings[viewport_name]:
if sensor_settings[viewport_name][sensor_name]["enabled"]:
sensor_folder = self.data_dir + "/" + str(viewport_name) + "/" + str(sensor_name)
if not os.path.exists(sensor_folder):
os.mkdir(sensor_folder)
| 10,072 | Python | 47.196172 | 150 | 0.552621 |
NVlabs/ACID/ACID/setup.py | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension
import numpy
# Get the numpy include directory.
numpy_include_dir = numpy.get_include()
# Extensions
# mcubes (marching cubes algorithm)
mcubes_module = Extension(
'src.utils.libmcubes.mcubes',
sources=[
'src/utils/libmcubes/mcubes.pyx',
'src/utils/libmcubes/pywrapper.cpp',
'src/utils/libmcubes/marchingcubes.cpp'
],
language='c++',
extra_compile_args=['-std=c++11'],
include_dirs=[numpy_include_dir]
)
# mise (efficient mesh extraction)
mise_module = Extension(
'src.utils.libmise.mise',
sources=[
'src/utils/libmise/mise.pyx'
],
)
# simplify (efficient mesh simplification)
simplify_mesh_module = Extension(
'src.utils.libsimplify.simplify_mesh',
sources=[
'src/utils/libsimplify/simplify_mesh.pyx'
],
include_dirs=[numpy_include_dir]
)
# Gather all extension modules
ext_modules = [
mcubes_module,
mise_module,
simplify_mesh_module,
]
setup(
ext_modules=cythonize(ext_modules),
cmdclass={
'build_ext': BuildExtension
}
)
| 1,311 | Python | 21.237288 | 81 | 0.691076 |
NVlabs/ACID/ACID/plush_train.py | import torch
import torch.optim as optim
from tensorboardX import SummaryWriter
import matplotlib; matplotlib.use('Agg')
import numpy as np
import os
import argparse
import time, datetime
from src import config, data
from src.checkpoints import CheckpointIO
from collections import defaultdict
import shutil
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from src.utils import common_util
import matplotlib.pyplot as plt
from PIL import Image
# Arguments
parser = argparse.ArgumentParser(
description='Train a Plush Env dynamics model.'
)
parser.add_argument('config', type=str, help='Path to config file.')
parser.add_argument('--no-cuda', action='store_true', help='Do not use cuda.')
parser.add_argument('--exit-after', type=int, default=-1,
help='Checkpoint and exit after specified number of seconds'
'with exit code 2.')
parser.add_argument('--debug', action='store_true', help='debugging')
parser.add_argument('--eval_only', action='store_true', help='run eval only')
args = parser.parse_args()
cfg = config.load_config(args.config, 'configs/default.yaml')
is_cuda = (torch.cuda.is_available() and not args.no_cuda)
device = torch.device("cuda" if is_cuda else "cpu")
# Set t0
t0 = time.time()
# Shorthands
out_dir = cfg['training']['out_dir']
if args.debug:
cfg['training']['batch_size'] = 2
cfg['training']['vis_n_outputs'] = 1
cfg['training']['print_every'] = 1
cfg['training']['backup_every'] = 1
cfg['training']['validate_every'] = 1
cfg['training']['visualize_every'] = 1
cfg['training']['checkpoint_every'] = 1
cfg['training']['visualize_total'] = 1
batch_size = cfg['training']['batch_size']
backup_every = cfg['training']['backup_every']
vis_n_outputs = cfg['generation']['vis_n_outputs']
exit_after = args.exit_after
model_selection_metric = cfg['training']['model_selection_metric']
if cfg['training']['model_selection_mode'] == 'maximize':
model_selection_sign = 1
elif cfg['training']['model_selection_mode'] == 'minimize':
model_selection_sign = -1
else:
raise ValueError('model_selection_mode must be '
'either maximize or minimize.')
# Output directory
if not os.path.exists(out_dir):
os.makedirs(out_dir)
shutil.copyfile(args.config, os.path.join(out_dir, 'config.yaml'))
# Dataset
train_loader = data.core.get_plush_loader(cfg, cfg['model']['type'], split='train')
val_loader = data.core.get_plush_loader(cfg, cfg['model']['type'], split='test')
# Model
model = config.get_model(cfg, device=device)
# Generator
generator = config.get_generator(model, cfg, device=device)
# Intialize training
optimizer = optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-4)
trainer = config.get_trainer(model, optimizer, cfg, device=device)
checkpoint_io = CheckpointIO(out_dir, model=model, optimizer=optimizer)
try:
load_dict = checkpoint_io.load('model_best.pt')
except FileExistsError:
load_dict = dict()
epoch_it = load_dict.get('epoch_it', 0)
it = load_dict.get('it', 0)
metric_val_best = load_dict.get(
'loss_val_best', -model_selection_sign * np.inf)
if metric_val_best == np.inf or metric_val_best == -np.inf:
metric_val_best = -model_selection_sign * np.inf
print('Current best validation metric (%s): %.8f'
% (model_selection_metric, metric_val_best))
logger = SummaryWriter(os.path.join(out_dir, 'logs'))
# Shorthands
print_every = cfg['training']['print_every']
checkpoint_every = cfg['training']['checkpoint_every']
validate_every = cfg['training']['validate_every']
visualize_every = cfg['training']['visualize_every']
# Print model
nparameters = sum(p.numel() for p in model.parameters())
print('Total number of parameters: %d' % nparameters)
print('output path: ', cfg['training']['out_dir'])
# For visualizations
data_vis_list = []
if cfg['model']['type'] == 'geom':
vis_dataset = data.core.get_geom_dataset(cfg, split='vis')
elif cfg['model']['type'] == 'combined':
vis_dataset = data.core.get_combined_dataset(cfg, split='vis')
# Build a data dictionary for visualization
np.random.seed(0)
data_idxes = np.random.randint(len(vis_dataset), size=cfg['training']['visualize_total'])
for i, id in enumerate(data_idxes):
data_vis = data.core.collate_pair_fn([vis_dataset[id]])
data_vis_list.append({'it': i, 'data': data_vis})
if args.eval_only:
eval_dict, figs = trainer.evaluate(val_loader)
metric_val = eval_dict[model_selection_metric]
for k, v in eval_dict.items():
print(f"metric {k}: {v}")
print('Validation metric (%s): %.4f'
% (model_selection_metric, metric_val))
for k,v in figs.items():
fig_path = os.path.join(out_dir, 'vis', f"{k}_eval_best.png")
v.savefig(fig_path)
for data_vis in data_vis_list:
out = generator.generate_mesh(data_vis['data'])
# Get statistics
try:
mesh, stats_dict = out
except TypeError:
mesh, stats_dict = out, {}
mesh.export(os.path.join(out_dir, 'vis', f"best_{data_vis['it']}.off"))
out2 = generator.generate_pointcloud(data_vis['data'])
for i,pcloud in enumerate(out2):
ipath = os.path.join(out_dir, 'vis', f"best_{data_vis['it']}_{i}.obj")
common_util.write_pointcoud_as_obj(ipath, pcloud)
pcloud_dict = [{"title":'source'if i == 0 else 'target',
"pts": p[:,:3],
"col": None if p.shape[1] == 3 else p[:,3:]
} for i,p in enumerate(out2)]
fig = common_util.side_by_side_point_clouds(pcloud_dict)
width, height = fig.get_size_inches() * fig.get_dpi()
canvas = FigureCanvas(fig)
canvas.draw()
img_path = os.path.join(out_dir, 'vis', f"best_{data_vis['it']}.png")
Image.fromarray(
np.frombuffer(
canvas.tostring_rgb(),
dtype='uint8').reshape(int(height), int(width), 3)).save(
img_path
)
plt.close(fig)
quit()
while True:
epoch_it += 1
for batch in train_loader:
it += 1
losses = trainer.train_step(batch, it)
for k,v in losses.items():
logger.add_scalar(f'train/{k}_loss', v, it)
# Print output
if (it % print_every) == 0:
t = datetime.datetime.now()
print_str = f"[Epoch {epoch_it:04d}] it={it:04d}, time: {time.time()-t0:.3f}, "
print_str += f"{t.hour:02d}:{t.minute:02d}, "
for k,v in losses.items():
print_str += f"{k}:{v:.4f}, "
print(print_str)
# Save checkpoint
if (checkpoint_every > 0 and (it % checkpoint_every) == 0):
print('Saving checkpoint')
checkpoint_io.save('model.pt', epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
# Backup if necessary
if (backup_every > 0 and (it % backup_every) == 0):
print('Backup checkpoint')
checkpoint_io.save('model_%d.pt' % it, epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
# Run validation
if validate_every > 0 and (it % validate_every) == 0:
print('Running Validation')
eval_dict, figs = trainer.evaluate(val_loader)
for k,v in figs.items():
fig_path = os.path.join(out_dir, 'vis', f"{k}_{it}.png")
v.savefig(fig_path)
logger.add_figure(k, v, it)
metric_val = eval_dict[model_selection_metric]
print('Validation metric (%s): %.4f'
% (model_selection_metric, metric_val))
for k, v in eval_dict.items():
print(f"metric {k}: {v}")
logger.add_scalar('val/%s' % k, v, it)
if model_selection_sign * (metric_val - metric_val_best) > 0:
metric_val_best = metric_val
print('New best model (loss %.4f)' % metric_val_best)
checkpoint_io.save('model_best.pt', epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
# Visualize output
if visualize_every > 0 and (it % visualize_every) == 0:
print('Visualizing')
renders = []
for data_vis in data_vis_list:
out = generator.generate_mesh(data_vis['data'])
# Get statistics
try:
mesh, stats_dict = out
except TypeError:
mesh, stats_dict = out, {}
mesh.export(os.path.join(out_dir, 'vis', '{}_{}.off'.format(it, data_vis['it'])))
out2 = generator.generate_pointcloud(data_vis['data'])
for i,pcloud in enumerate(out2):
ipath = os.path.join(out_dir, 'vis', f"{it}_{data_vis['it']}_{i}.obj")
common_util.write_pointcoud_as_obj(ipath, pcloud)
name_dict = ['source', 'target', 'source_rollout', 'target_rollout']
pcloud_dict = [{"title":name_dict[i],
"pts": p[:,:3],
"col": None if p.shape[1] == 3 else p[:,3:]
} for i,p in enumerate(out2)]
fig = common_util.side_by_side_point_clouds(pcloud_dict)
width, height = fig.get_size_inches() * fig.get_dpi()
canvas = FigureCanvas(fig)
canvas.draw()
img_path = os.path.join(out_dir, 'vis', f"{it}_{data_vis['it']}.png")
Image.fromarray(
np.frombuffer(
canvas.tostring_rgb(),
dtype='uint8').reshape(int(height), int(width), 3)).save(
img_path
)
plt.close(fig)
# Exit if necessary
if exit_after > 0 and (time.time() - t0) >= exit_after:
print('Time limit reached. Exiting.')
checkpoint_io.save('model.pt', epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
exit(3)
| 10,307 | Python | 39.108949 | 116 | 0.573979 |
NVlabs/ACID/ACID/src/training.py | import numpy as np
from collections import defaultdict
from tqdm import tqdm
class BaseTrainer(object):
''' Base trainer class.
'''
def evaluate(self, val_loader):
''' Performs an evaluation.
Args:
val_loader (dataloader): pytorch dataloader
'''
eval_list = defaultdict(list)
for data in tqdm(val_loader):
eval_step_dict = self.eval_step(data)
for k, v in eval_step_dict.items():
eval_list[k].append(v)
eval_dict = {k: np.mean(v) for k, v in eval_list.items()}
return eval_dict
def train_step(self, *args, **kwargs):
''' Performs a training step.
'''
raise NotImplementedError
def eval_step(self, *args, **kwargs):
''' Performs an evaluation step.
'''
raise NotImplementedError
def visualize(self, *args, **kwargs):
''' Performs visualization.
'''
raise NotImplementedError
| 988 | Python | 23.724999 | 65 | 0.571862 |
NVlabs/ACID/ACID/src/common.py | # import multiprocessing
import torch
import numpy as np
import math
import numpy as np
def compute_iou(occ1, occ2):
''' Computes the Intersection over Union (IoU) value for two sets of
occupancy values.
Args:
occ1 (tensor): first set of occupancy values
occ2 (tensor): second set of occupancy values
'''
occ1 = np.asarray(occ1)
occ2 = np.asarray(occ2)
# Put all data in second dimension
# Also works for 1-dimensional data
if occ1.ndim >= 2:
occ1 = occ1.reshape(occ1.shape[0], -1)
if occ2.ndim >= 2:
occ2 = occ2.reshape(occ2.shape[0], -1)
# Convert to boolean values
occ1 = (occ1 >= 0.5)
occ2 = (occ2 >= 0.5)
# Compute IOU
area_union = (occ1 | occ2).astype(np.float32).sum(axis=-1)
area_intersect = (occ1 & occ2).astype(np.float32).sum(axis=-1)
iou = (area_intersect / area_union)
return iou
def chamfer_distance(points1, points2, give_id=False):
''' Returns the chamfer distance for the sets of points.
Args:
points1 (numpy array): first point set
points2 (numpy array): second point set
use_kdtree (bool): whether to use a kdtree
give_id (bool): whether to return the IDs of nearest points
'''
return chamfer_distance_naive(points1, points2)
def chamfer_distance_naive(points1, points2):
''' Naive implementation of the Chamfer distance.
Args:
points1 (numpy array): first point set
points2 (numpy array): second point set
'''
assert(points1.size() == points2.size())
batch_size, T, _ = points1.size()
points1 = points1.view(batch_size, T, 1, 3)
points2 = points2.view(batch_size, 1, T, 3)
distances = (points1 - points2).pow(2).sum(-1)
chamfer1 = distances.min(dim=1)[0].mean(dim=1)
chamfer2 = distances.min(dim=2)[0].mean(dim=1)
chamfer = chamfer1 + chamfer2
return chamfer
def make_3d_grid(bb_min, bb_max, shape):
''' Makes a 3D grid.
Args:
bb_min (tuple): bounding box minimum
bb_max (tuple): bounding box maximum
shape (tuple): output shape
'''
size = shape[0] * shape[1] * shape[2]
pxs = torch.linspace(bb_min[0], bb_max[0], shape[0])
pys = torch.linspace(bb_min[1], bb_max[1], shape[1])
pzs = torch.linspace(bb_min[2], bb_max[2], shape[2])
pxs = pxs.view(-1, 1, 1).expand(*shape).contiguous().view(size)
pys = pys.view(1, -1, 1).expand(*shape).contiguous().view(size)
pzs = pzs.view(1, 1, -1).expand(*shape).contiguous().view(size)
p = torch.stack([pxs, pys, pzs], dim=1)
return p
def transform_points(points, transform):
''' Transforms points with regard to passed camera information.
Args:
points (tensor): points tensor
transform (tensor): transformation matrices
'''
assert(points.size(2) == 3)
assert(transform.size(1) == 3)
assert(points.size(0) == transform.size(0))
if transform.size(2) == 4:
R = transform[:, :, :3]
t = transform[:, :, 3:]
points_out = points @ R.transpose(1, 2) + t.transpose(1, 2)
elif transform.size(2) == 3:
K = transform
points_out = points @ K.transpose(1, 2)
return points_out
def b_inv(b_mat):
''' Performs batch matrix inversion.
Arguments:
b_mat: the batch of matrices that should be inverted
'''
eye = b_mat.new_ones(b_mat.size(-1)).diag().expand_as(b_mat)
b_inv, _ = torch.gesv(eye, b_mat)
return b_inv
def project_to_camera(points, transform):
''' Projects points to the camera plane.
Args:
points (tensor): points tensor
transform (tensor): transformation matrices
'''
p_camera = transform_points(points, transform)
p_camera = p_camera[..., :2] / p_camera[..., 2:]
return p_camera
def fix_Rt_camera(Rt, loc, scale):
''' Fixes Rt camera matrix.
Args:
Rt (tensor): Rt camera matrix
loc (tensor): location
scale (float): scale
'''
# Rt is B x 3 x 4
# loc is B x 3 and scale is B
batch_size = Rt.size(0)
R = Rt[:, :, :3]
t = Rt[:, :, 3:]
scale = scale.view(batch_size, 1, 1)
R_new = R * scale
t_new = t + R @ loc.unsqueeze(2)
Rt_new = torch.cat([R_new, t_new], dim=2)
assert(Rt_new.size() == (batch_size, 3, 4))
return Rt_new
def normalize_coordinate(p, padding=0.1, plane='xz'):
''' Normalize coordinate to [0, 1] for unit cube experiments
Args:
p (tensor): point
padding (float): conventional padding paramter of ONet for unit cube, so [-0.5, 0.5] -> [-0.55, 0.55]
plane (str): plane feature type, ['xz', 'xy', 'yz']
'''
if plane == 'xz':
xy = p[:, :, [0, 2]]
elif plane =='xy':
xy = p[:, :, [0, 1]]
else:
xy = p[:, :, [1, 2]]
xy_new = xy / (1 + padding + 10e-6) # (-0.5, 0.5)
xy_new = xy_new + 0.5 # range (0, 1)
# f there are outliers out of the range
if xy_new.max() >= 1:
xy_new[xy_new >= 1] = 1 - 10e-6
if xy_new.min() < 0:
xy_new[xy_new < 0] = 0.0
return xy_new
def normalize_3d_coordinate(p, padding=0.1):
''' Normalize coordinate to [0, 1] for unit cube experiments.
Corresponds to our 3D model
Args:
p (tensor): point
padding (float): conventional padding paramter of ONet for unit cube, so [-0.5, 0.5] -> [-0.55, 0.55]
'''
p_nor = p / (1 + padding + 10e-4) # (-0.5, 0.5)
p_nor = p_nor + 0.5 # range (0, 1)
# f there are outliers out of the range
if p_nor.max() >= 1:
p_nor[p_nor >= 1] = 1 - 10e-4
if p_nor.min() < 0:
p_nor[p_nor < 0] = 0.0
return p_nor
def normalize_coord(p, vol_range, plane='xz'):
''' Normalize coordinate to [0, 1] for sliding-window experiments
Args:
p (tensor): point
vol_range (numpy array): volume boundary
plane (str): feature type, ['xz', 'xy', 'yz'] - canonical planes; ['grid'] - grid volume
'''
p[:, 0] = (p[:, 0] - vol_range[0][0]) / (vol_range[1][0] - vol_range[0][0])
p[:, 1] = (p[:, 1] - vol_range[0][1]) / (vol_range[1][1] - vol_range[0][1])
p[:, 2] = (p[:, 2] - vol_range[0][2]) / (vol_range[1][2] - vol_range[0][2])
if plane == 'xz':
x = p[:, [0, 2]]
elif plane =='xy':
x = p[:, [0, 1]]
elif plane =='yz':
x = p[:, [1, 2]]
else:
x = p
return x
def coordinate2index(x, reso, coord_type='2d'):
''' Normalize coordinate to [0, 1] for unit cube experiments.
Corresponds to our 3D model
Args:
x (tensor): coordinate
reso (int): defined resolution
coord_type (str): coordinate type
'''
x = (x * reso).long()
if coord_type == '2d': # plane
index = x[:, :, 0] + reso * x[:, :, 1]
elif coord_type == '3d': # grid
index = x[:, :, 0] + reso * (x[:, :, 1] + reso * x[:, :, 2])
index = index[:, None, :]
return index
def coord2index(p, vol_range, reso=None, plane='xz'):
''' Normalize coordinate to [0, 1] for sliding-window experiments.
Corresponds to our 3D model
Args:
p (tensor): points
vol_range (numpy array): volume boundary
reso (int): defined resolution
plane (str): feature type, ['xz', 'xy', 'yz'] - canonical planes; ['grid'] - grid volume
'''
# normalize to [0, 1]
x = normalize_coord(p, vol_range, plane=plane)
if isinstance(x, np.ndarray):
x = np.floor(x * reso).astype(int)
else: #* pytorch tensor
x = (x * reso).long()
if x.shape[1] == 2:
index = x[:, 0] + reso * x[:, 1]
index[index > reso**2] = reso**2
elif x.shape[1] == 3:
index = x[:, 0] + reso * (x[:, 1] + reso * x[:, 2])
index[index > reso**3] = reso**3
return index[None]
def update_reso(reso, depth):
''' Update the defined resolution so that UNet can process.
Args:
reso (int): defined resolution
depth (int): U-Net number of layers
'''
base = 2**(int(depth) - 1)
if ~(reso / base).is_integer(): # when this is not integer, U-Net dimension error
for i in range(base):
if ((reso + i) / base).is_integer():
reso = reso + i
break
return reso
def decide_total_volume_range(query_vol_metric, recep_field, unit_size, unet_depth):
''' Update the defined resolution so that UNet can process.
Args:
query_vol_metric (numpy array): query volume size
recep_field (int): defined the receptive field for U-Net
unit_size (float): the defined voxel size
unet_depth (int): U-Net number of layers
'''
reso = query_vol_metric / unit_size + recep_field - 1
reso = update_reso(int(reso), unet_depth) # make sure input reso can be processed by UNet
input_vol_metric = reso * unit_size
p_c = np.array([0.0, 0.0, 0.0]).astype(np.float32)
lb_input_vol, ub_input_vol = p_c - input_vol_metric/2, p_c + input_vol_metric/2
lb_query_vol, ub_query_vol = p_c - query_vol_metric/2, p_c + query_vol_metric/2
input_vol = [lb_input_vol, ub_input_vol]
query_vol = [lb_query_vol, ub_query_vol]
# handle the case when resolution is too large
if reso > 10000:
reso = 1
return input_vol, query_vol, reso
def add_key(base, new, base_name, new_name, device=None):
''' Add new keys to the given input
Args:
base (tensor): inputs
new (tensor): new info for the inputs
base_name (str): name for the input
new_name (str): name for the new info
device (device): pytorch device
'''
if (new is not None) and (isinstance(new, dict)):
if device is not None:
for key in new.keys():
new[key] = new[key].to(device)
base = {base_name: base,
new_name: new}
return base
class map2local(object):
''' Add new keys to the given input
Args:
s (float): the defined voxel size
pos_encoding (str): method for the positional encoding, linear|sin_cos
'''
def __init__(self, s, pos_encoding='linear'):
super().__init__()
self.s = s
self.pe = positional_encoding(basis_function=pos_encoding)
def __call__(self, p):
p = torch.remainder(p, self.s) / self.s # always possitive
# p = torch.fmod(p, self.s) / self.s # same sign as input p!
p = self.pe(p)
return p
class positional_encoding(object):
''' Positional Encoding (presented in NeRF)
Args:
basis_function (str): basis function
'''
def __init__(self, basis_function='sin_cos'):
super().__init__()
self.func = basis_function
L = 10
freq_bands = 2.**(np.linspace(0, L-1, L))
self.freq_bands = freq_bands * math.pi
def __call__(self, p):
if self.func == 'sin_cos':
out = []
p = 2.0 * p - 1.0 # chagne to the range [-1, 1]
for freq in self.freq_bands:
out.append(torch.sin(freq * p))
out.append(torch.cos(freq * p))
p = torch.cat(out, dim=2)
return p
| 11,186 | Python | 29.399456 | 109 | 0.562846 |
NVlabs/ACID/ACID/src/config.py | import yaml
from torchvision import transforms
from src import data
from src import conv_onet
method_dict = {
'conv_onet': conv_onet
}
# General config
def load_config(path, default_path=None):
''' Loads config file.
Args:
path (str): path to config file
default_path (bool): whether to use default path
'''
# Load configuration from file itself
with open(path, 'r') as f:
cfg_special = yaml.load(f)
# Check if we should inherit from a config
inherit_from = cfg_special.get('inherit_from')
# If yes, load this config first as default
# If no, use the default_path
if inherit_from is not None:
cfg = load_config(inherit_from, default_path)
elif default_path is not None:
with open(default_path, 'r') as f:
cfg = yaml.load(f)
else:
cfg = dict()
# Include main configuration
update_recursive(cfg, cfg_special)
return cfg
def update_recursive(dict1, dict2):
''' Update two config dictionaries recursively.
Args:
dict1 (dict): first dictionary to be updated
dict2 (dict): second dictionary which entries should be used
'''
for k, v in dict2.items():
if k not in dict1:
dict1[k] = dict()
if isinstance(v, dict):
update_recursive(dict1[k], v)
else:
dict1[k] = v
# Models
def get_model(cfg, device=None, dataset=None):
''' Returns the model instance.
Args:
cfg (dict): config dictionary
device (device): pytorch device
dataset (dataset): dataset
'''
method = cfg['method']
model = method_dict[method].config.get_model(
cfg, device=device, dataset=dataset)
return model
# Trainer
def get_trainer(model, optimizer, cfg, device):
''' Returns a trainer instance.
Args:
model (nn.Module): the model which is used
optimizer (optimizer): pytorch optimizer
cfg (dict): config dictionary
device (device): pytorch device
'''
method = cfg['method']
trainer = method_dict[method].config.get_trainer(
model, optimizer, cfg, device)
return trainer
# Generator for final mesh extraction
def get_generator(model, cfg, device):
''' Returns a generator instance.
Args:
model (nn.Module): the model which is used
cfg (dict): config dictionary
device (device): pytorch device
'''
method = cfg['method']
generator = method_dict[method].config.get_generator(model, cfg, device)
return generator
| 2,573 | Python | 23.990291 | 76 | 0.624563 |
NVlabs/ACID/ACID/src/checkpoints.py | import os
import urllib
import torch
from torch.utils import model_zoo
class CheckpointIO(object):
''' CheckpointIO class.
It handles saving and loading checkpoints.
Args:
checkpoint_dir (str): path where checkpoints are saved
'''
def __init__(self, checkpoint_dir='./chkpts', **kwargs):
self.module_dict = kwargs
self.checkpoint_dir = checkpoint_dir
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
def register_modules(self, **kwargs):
''' Registers modules in current module dictionary.
'''
self.module_dict.update(kwargs)
def save(self, filename, **kwargs):
''' Saves the current module dictionary.
Args:
filename (str): name of output file
'''
if not os.path.isabs(filename):
filename = os.path.join(self.checkpoint_dir, filename)
outdict = kwargs
for k, v in self.module_dict.items():
outdict[k] = v.state_dict()
torch.save(outdict, filename)
def load(self, filename):
'''Loads a module dictionary from local file or url.
Args:
filename (str): name of saved module dictionary
'''
if is_url(filename):
return self.load_url(filename)
else:
return self.load_file(filename)
def load_file(self, filename):
'''Loads a module dictionary from file.
Args:
filename (str): name of saved module dictionary
'''
if not os.path.isabs(filename):
filename = os.path.join(self.checkpoint_dir, filename)
if os.path.exists(filename):
print(filename)
print('=> Loading checkpoint from local file...')
state_dict = torch.load(filename)
scalars = self.parse_state_dict(state_dict)
return scalars
else:
raise FileExistsError
def load_url(self, url):
'''Load a module dictionary from url.
Args:
url (str): url to saved model
'''
print(url)
print('=> Loading checkpoint from url...')
state_dict = model_zoo.load_url(url, progress=True)
scalars = self.parse_state_dict(state_dict)
return scalars
def parse_state_dict(self, state_dict):
'''Parse state_dict of model and return scalars.
Args:
state_dict (dict): State dict of model
'''
for k, v in self.module_dict.items():
if k in state_dict:
v.load_state_dict(state_dict[k])
else:
print('Warning: Could not find %s in checkpoint!' % k)
scalars = {k: v for k, v in state_dict.items()
if k not in self.module_dict}
return scalars
def is_url(url):
scheme = urllib.parse.urlparse(url).scheme
return scheme in ('http', 'https') | 2,962 | Python | 28.63 | 70 | 0.568535 |
NVlabs/ACID/ACID/src/layers.py | import torch
import torch.nn as nn
# Resnet Blocks
class ResnetBlockFC(nn.Module):
''' Fully connected ResNet Block class.
Args:
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
'''
def __init__(self, size_in, size_out=None, size_h=None):
super().__init__()
# Attributes
if size_out is None:
size_out = size_in
if size_h is None:
size_h = min(size_in, size_out)
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
self.fc_0 = nn.Linear(size_in, size_h)
self.fc_1 = nn.Linear(size_h, size_out)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Linear(size_in, size_out, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x):
net = self.fc_0(self.actvn(x))
dx = self.fc_1(self.actvn(net))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx | 1,203 | Python | 24.083333 | 68 | 0.532835 |
NVlabs/ACID/ACID/src/conv_onet/training.py | import os
import numpy as np
import torch
from torch.nn import functional as F
from src.common import compute_iou
from src.utils import common_util, plushsim_util
from src.training import BaseTrainer
from sklearn.metrics import roc_curve
from scipy import interp
import matplotlib.pyplot as plt
from collections import defaultdict
from tqdm import tqdm
from src.utils.plushsim_util import find_nn_cpu, find_emd_cpu
class PlushTrainer(BaseTrainer):
''' Trainer object for the Occupancy Network.
Args:
model (nn.Module): Occupancy Network model
optimizer (optimizer): pytorch optimizer object
device (device): pytorch device
input_type (str): input type
vis_dir (str): visualization directory
threshold (float): threshold value
eval_sample (bool): whether to evaluate samples
'''
def __init__(self, model, optimizer, cfg, device=None, vis_dir=None, ):
self.model = model
self.optimizer = optimizer
self.device = device
self.vis_dir = vis_dir
self.threshold = cfg['test']['threshold']
self.pos_weight = torch.FloatTensor([cfg['training']['pos_weight']]).to(device)
if 'corr_dim' in cfg['model']['decoder_kwargs'] and cfg['model']['decoder_kwargs']['corr_dim'] > 0:
self.contrastive_threshold = cfg['loss']['contrastive_threshold']
self.use_geodesics = cfg['loss']['use_geodesics']
self.loss_type = cfg['loss']['type']
self.contrastive_coeff_neg = cfg['loss'].get('contrastive_coeff_neg', 1.)
self.contrastive_neg_thres = cfg['loss'].get('contrastive_neg_thres', 1.)
self.contrastive_coeff_pos = cfg['loss'].get('contrastive_coeff_pos', 1.)
self.contrastive_pos_thres= cfg['loss'].get('contrastive_pos_thres', 0.1)
self.scale_with_geodesics = cfg['loss'].get('scale_with_geodesics', False)
if vis_dir is not None and not os.path.exists(vis_dir):
os.makedirs(vis_dir)
self.max_thres = 0.2
self.discretization = 1000
self.base_fpr = np.linspace(0,1,101)
self.base_thres = np.linspace(0,self.max_thres,self.discretization)
def train_step(self, data, it):
''' Performs a training step.
Args:
data (dict): data dictionary
'''
self.model.train()
self.optimizer.zero_grad()
losses = self.compute_loss(data, it)
loss = 0
for v in losses.values():
loss += v
loss.backward()
self.optimizer.step()
return {k:v.item() for k,v in losses.items()}
def evaluate(self, val_loader):
''' Performs an evaluation.
Args:
val_loader (dataloader): pytorch dataloader
'''
eval_list = defaultdict(list)
agg_list = defaultdict(list)
for data in tqdm(val_loader):
eval_step_dict, agg_step_dict = self.eval_step(data)
for k, v in eval_step_dict.items():
eval_list[k].append(v)
for k, v in agg_step_dict.items():
agg_list[k].append(v)
eval_dict = {k: np.mean(v) for k, v in eval_list.items()}
# - shape completion ROC
figs = {}
if 'tpr' in agg_list:
figs['OCC_ROC'] = self._get_shape_completion_ROC(agg_list['tpr'])
if 'fmr_hits' in agg_list:
fmr = np.array(agg_list['fmr_hits'])
idx01 = int(0.01 * (self.discretization-1) / self.max_thres)
idx02 = int(0.02 * (self.discretization-1) / self.max_thres)
idx05 = int(0.05 * (self.discretization-1) / self.max_thres)
idx10 = int(0.10 * (self.discretization-1) / self.max_thres)
eval_dict['FMR.01m_5%'] = np.mean(fmr[:,idx01] > 0.05)
eval_dict['FMR.02m_5%'] = np.mean(fmr[:,idx02] > 0.05)
eval_dict['FMR.05m_5%'] = np.mean(fmr[:,idx05] > 0.05)
eval_dict['FMR.10m_5%'] = np.mean(fmr[:,idx10] > 0.05)
fmr_std = fmr.std(axis=0)
eval_dict['FMR.01m_5%_std'] = fmr_std[idx01]
eval_dict['FMR.02m_5%_std'] = fmr_std[idx02]
eval_dict['FMR.05m_5%_std'] = fmr_std[idx05]
eval_dict['FMR.10m_5%_std'] = fmr_std[idx10]
for tau2 in np.linspace(0.01,0.2,5):
figs[f'FMR_tau1_wrt_tau2={tau2:.3f}']= self._get_FMR_curve_tau1(fmr, tau2=tau2)
figs['FMR_tau1']= self._get_FMR_curve_tau1(fmr)
for tau1 in np.linspace(0.01,0.1,5):
figs[f'FMR_tau2_wrt_tau1={tau1:.3f}']= self._get_FMR_curve_tau2(fmr, tau1=tau1)
#ax.scatter(fpr, tpr, s=100, alpha=0.5, color="blue")
if 'pair_dist' in agg_list:
all_dists = np.concatenate(agg_list['pair_dist'])
eval_dict['pair_dist'] = all_dists.mean()
eval_dict['pair_dist_std'] = all_dists.std()
figs['dist_hist'] = self._get_pair_distance_histogram(all_dists)
return eval_dict, figs
def _get_pair_distance_histogram(self, all_dists):
fig, ax = plt.subplots(figsize=(10,7))
counts, bins, patches = ax.hist(all_dists, density=True, bins=40) # density=False would make counts
ax.set_ylabel('Density')
ax.set_xlabel('Pair Distance')
return fig
def _get_shape_completion_ROC(self, tpr):
tprs = np.array(tpr)
mean_tprs = tprs.mean(axis=0)
std = tprs.std(axis=0)
tprs_upper = np.minimum(mean_tprs + std, 1)
tprs_lower = np.maximum(mean_tprs - std, 0)
fig, ax = plt.subplots(figsize=(10,7))
ax.plot(self.base_fpr, mean_tprs, 'b')
ax.fill_between(self.base_fpr, tprs_lower, tprs_upper, color='grey', alpha=0.3)
ax.plot([0, 1], [0, 1],'r--')
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.0])
ax.set_ylabel('True Positive Rate')
ax.set_xlabel('False Positive Rate')
return fig
def _get_FMR_curve_tau2(self, fmrs, tau1=0.1):
idx05 = int(tau1 * (self.discretization-1) / self.max_thres)
# fix tau 1
means = []
tau1_min = 0.001
tau1_max = 0.25
tau1_ticks = np.linspace(tau1_min, tau1_max, 1000)
for t in tau1_ticks:
means.append(np.mean(fmrs[:,idx05] > t, axis=0))
fig, ax = plt.subplots(figsize=(10,7))
ax.plot(tau1_ticks, means, 'b')
ax.set_xlim([tau1_min, tau1_max])
ax.set_ylim([0.0, 1.0])
ax.set_ylabel('Feature Match Recall')
ax.set_xlabel('Inlier Ratio threshold')
return fig
def _get_FMR_curve_tau1(self, fmrs, tau2=0.05):
# tau2 = 0.05 is the inlier ratio
# fix tau 2
mean_fmrs = np.mean(fmrs > tau2, axis=0)
fig, ax = plt.subplots(figsize=(10,7))
ax.plot(self.base_thres, mean_fmrs, 'b')
ax.set_xlim([0.0, self.max_thres])
ax.set_ylim([0.0, 1.0])
ax.set_ylabel('Feature Match Recall')
ax.set_xlabel('Inlier Distance Threshold')
return fig
def eval_step(self, data):
''' Performs an evaluation step.
Args:
data (dict): data dictionary
'''
self.model.eval()
device = self.device
for k,v in data.items():
data[k] = v.to(device)
eval_dict = {}
agg = {}
idx = data['idx'].item()
# Compute iou
with torch.no_grad():
outputs = self.model(data)
gt_occ = data['sampled_occ']
B,_,N = gt_occ.shape
gt_occ = gt_occ.reshape([B*2, N])
occ_iou_np = (gt_occ >= 0.5).cpu().numpy()
occ_iou_hat_np = (outputs['occ'].probs >= self.threshold).cpu().numpy()
iou = compute_iou(occ_iou_np, occ_iou_hat_np).mean()
eval_dict['iou'] = iou
eval_dict[f'iou_{self.threshold}'] = iou
occ_iou_hat_np_2 = (outputs['occ'].probs >= 0.5).cpu().numpy()
iou = compute_iou(occ_iou_np, occ_iou_hat_np_2).mean()
eval_dict['iou_0.5'] = iou
intermediate = (self.threshold + 0.5) / 2
occ_iou_hat_np_3 = (outputs['occ'].probs >= intermediate).cpu().numpy()
iou = compute_iou(occ_iou_np, occ_iou_hat_np_3).mean()
eval_dict[f'iou_{intermediate}'] = iou
if 'flow' in outputs:
gt_flow = data['sampled_flow']
gt_flow = gt_flow.reshape([B*2, N, 3])
constant = torch.from_numpy(np.array((12.,12.,4.)) / 10. / (1.1,1.1,1.1)).float().cuda()
loss_flow = F.mse_loss(
outputs['flow'] * constant,
gt_flow * constant,
reduction='none')
eval_dict['flow_all_field'] = loss_flow.sum(-1).mean().item()
loss_flow_np = loss_flow.sum(-1).cpu().numpy()
loss_flow_pos = loss_flow_np[occ_iou_np]
# if empty scene, no flow of the object will be present
if len(loss_flow_pos) > 0:
eval_dict['flow'] = loss_flow_pos.mean()
gt_pts = data['sampled_pts'].reshape([B*2, N, 3]).cpu().numpy()
if 'flow' in outputs:
flow_vis_mean = []
for i in range(B*2):
gt_occ_pts = gt_pts[i][occ_iou_np[i]] * (1200, 1200, 400) / (1.1,1.1,1.1) + (0,0,180)
vis_idx = plushsim_util.render_points(gt_occ_pts,
plushsim_util.CAM_EXTR,
plushsim_util.CAM_INTR,
return_index=True)
vis_pts = gt_occ_pts[vis_idx]
flow_vis_mean.append(loss_flow_np[i][occ_iou_np[i]][vis_idx].mean())
eval_dict['flow_only_vis'] = np.mean(flow_vis_mean)
if idx % 10000 == 9999:
# do expensive evaluations
# occupancy ROC curve
fpr, tpr, _ = roc_curve(occ_iou_np.flatten(),
outputs['occ'].probs.cpu().numpy().flatten())
base_fpr = np.linspace(0, 1, 101)
tpr = interp(base_fpr, fpr, tpr)
tpr[0] = 0.0
agg['tpr'] = tpr
f1 = []
for i in range(B*2):
gt_occ_pts = common_util.subsample_points(gt_pts[i][occ_iou_np[i]], return_index=False)
pred_pts = common_util.subsample_points(gt_pts[i][occ_iou_hat_np[i]], return_index=False)
f1.append(common_util.f1_score(pred_pts, gt_occ_pts))
f1 = np.array(f1)
f1score, precision, recall = f1.mean(axis=0)
eval_dict['f1'] = f1score
eval_dict['precision'] = precision
eval_dict['recall'] = recall
if 'corr' in outputs:
# data prep corr
corr_f = outputs['corr']
num_pairs = corr_f.shape[1]
gt_match = np.arange(num_pairs)
src_f = corr_f[0].cpu().numpy()
tgt_f = corr_f[1].cpu().numpy()
# data prep pts
pts = data['sampled_pts'].cpu().numpy().squeeze()
src_pts = pts[0][:num_pairs] * (12,12,4) / (1.1,1.1,1.1)
tgt_pts = pts[1][:num_pairs] * (12,12,4) / (1.1,1.1,1.1)
# normalize points to maximum length of 1.
tgt_pts = tgt_pts / np.ptp(tgt_pts, axis=0).max()
_, nn_inds_st = find_emd_cpu(src_f, tgt_f)
# doing Feature-match recall.
eval_dict['match_exact'] = np.mean(gt_match == nn_inds_st)
dist_st = np.linalg.norm(tgt_pts - tgt_pts[nn_inds_st], axis=1)
eval_dict['match_0.05'] = np.mean(dist_st < 0.05)
eval_dict['match_0.1'] = np.mean(dist_st < 0.1)
hits = np.array([np.mean(dist_st < f) for f in self.base_thres])
agg['fmr_hits'] = hits
agg['pair_dist'] = dist_st
return eval_dict, agg
def compute_loss(self, data, it):
''' Computes the loss.
Args:
data (dict): data dictionary
'''
device = self.device
for k,v in data.items():
data[k] = v.to(device)
outputs = self.model(data)
loss = {}
eval_dict = {}
# Occupancy Loss
if 'occ' in outputs:
# gt points
gt_occ = data['sampled_occ']
B,_,N = gt_occ.shape
gt_occ = gt_occ.reshape([B*2, N])
occ_iou_np = (gt_occ >= 0.5).cpu().numpy()
# pred
logits = outputs['occ'].logits
loss_i = F.binary_cross_entropy_with_logits(
logits, gt_occ, reduction='none', pos_weight=self.pos_weight)
loss['occ'] = loss_i.mean()
# eval infos
occ_iou_hat_np = (outputs['occ'].probs >= self.threshold).cpu().numpy()
iou = compute_iou(occ_iou_np, occ_iou_hat_np).mean()
eval_dict['iou'] = iou
if 'flow' in outputs :
gt_occ = data['sampled_occ']
B,_,N = gt_occ.shape
gt_occ = gt_occ.reshape([B*2, N])
mask = (gt_occ > 0.5).bool()
gt_flow = data['sampled_flow']
gt_flow = gt_flow.reshape([B*2, N, 3])
flow_gt_0 = gt_flow[~mask]
flow_gt_1 = gt_flow[mask]
flow_pred = outputs['flow']
flow_pred_0 = flow_pred[~mask]
flow_pred_1 = flow_pred[mask]
loss['flow'] = F.mse_loss(flow_pred_1, flow_gt_1) + 0.01 * F.mse_loss(flow_pred_0, flow_gt_0)
if 'corr' in outputs:
dist_vec = data['geo_dists']
corr_f = outputs['corr']
src_f = corr_f[0]
src_pos = src_f[dist_vec <= self.contrastive_threshold]
num_positive = (dist_vec <= self.contrastive_threshold).sum()
tgt_f = corr_f[1]
tgt_pos = tgt_f[dist_vec <= self.contrastive_threshold]
if self.loss_type == "contrastive":
if num_positive > 0:
src_neg = src_f[dist_vec > self.contrastive_threshold]
tgt_neg = tgt_f[dist_vec > self.contrastive_threshold]
# Positive loss
pos_loss = F.relu(((src_pos - tgt_pos).pow(2).sum(1) + 1e-4).sqrt()
- self.contrastive_pos_thres).pow(2)
pos_loss_mean = pos_loss.mean()
loss['contrastive_pos'] = self.contrastive_coeff_pos * pos_loss_mean
# Negative loss
neg_dist = (dist_vec[dist_vec > self.contrastive_threshold]
/ self.contrastive_threshold).log() + 1.
neg_dist = torch.clamp(neg_dist, max=2)
neg_loss = F.relu(neg_dist -
((src_neg - tgt_neg).pow(2).sum(1) + 1e-4).sqrt()).pow(2)
if self.scale_with_geodesics:
neg_loss = neg_loss / neg_dist
neg_loss_mean = neg_loss.mean()
loss['contrastive_neg'] = self.contrastive_coeff_neg * neg_loss_mean
return loss
| 15,474 | Python | 42.105849 | 109 | 0.511439 |
NVlabs/ACID/ACID/src/conv_onet/config.py | import os
from src.encoder import encoder_dict
from src.conv_onet import models, training
from src.conv_onet import generation
from src import data
def get_model(cfg,device=None, dataset=None, **kwargs):
if cfg['model']['type'] == 'geom':
return get_geom_model(cfg,device,dataset)
elif cfg['model']['type'] == 'combined':
return get_combined_model(cfg,device,dataset)
def get_combined_model(cfg, device=None, dataset=None, **kwargs):
''' Return the Occupancy Network model.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
dataset (dataset): dataset
'''
dim = cfg['data']['dim']
act_dim = cfg['data']['act_dim']
obj_c_dim = cfg['model']['obj_c_dim']
decoder_kwargs = cfg['model']['decoder_kwargs']
obj_encoder_kwargs = cfg['model']['obj_encoder_kwargs']
padding = cfg['data']['padding']
decoder = 'combined_decoder'
encoder = 'geom_encoder'
if 'env_c_dim' in cfg['model'] and 'env_c_dim' != 0:
env_c_dim = cfg['model']['env_c_dim']
env_encoder_kwargs = cfg['model']['env_encoder_kwargs']
env_encoder = encoder_dict[encoder](
dim=dim, c_dim=env_c_dim, padding=padding,
**env_encoder_kwargs
)
else:
env_c_dim = 0
env_encoder=None
decoder = models.decoder_dict[decoder](
dim=dim,
c_per_dim=obj_c_dim+env_c_dim,
c_act_dim=obj_c_dim+env_c_dim,
padding=padding,
**decoder_kwargs
)
obj_per_encoder = encoder_dict[encoder](
dim=dim, c_dim=obj_c_dim, padding=padding,
**obj_encoder_kwargs
)
obj_act_encoder = encoder_dict[encoder](
dim=act_dim, c_dim=obj_c_dim, padding=padding,
**obj_encoder_kwargs
)
model = models.ConvImpDyn(
obj_per_encoder, obj_act_encoder, env_encoder, decoder, device=device
)
return model
def get_geom_model(cfg, device=None, dataset=None, **kwargs):
''' Return the Occupancy Network model.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
dataset (dataset): dataset
'''
dim = cfg['data']['dim']
obj_c_dim = cfg['model']['obj_c_dim']
decoder_kwargs = cfg['model']['decoder_kwargs']
obj_encoder_kwargs = cfg['model']['obj_encoder_kwargs']
padding = cfg['data']['padding']
decoder = 'geom_decoder'
encoder = 'geom_encoder'
if 'env_c_dim' in cfg['model'] and 'env_c_dim' != 0:
env_c_dim = cfg['model']['env_c_dim']
env_encoder_kwargs = cfg['model']['env_encoder_kwargs']
env_encoder = encoder_dict[encoder](
dim=dim, c_dim=env_c_dim, padding=padding,
**env_encoder_kwargs
)
else:
env_c_dim = 0
env_encoder=None
decoder = models.decoder_dict[decoder](
dim=dim, c_dim=obj_c_dim+env_c_dim, padding=padding,
**decoder_kwargs
)
obj_encoder = encoder_dict[encoder](
dim=dim, c_dim=obj_c_dim, padding=padding,
**obj_encoder_kwargs
)
model = models.ConvOccGeom(
obj_encoder, env_encoder, decoder, device=device
)
return model
def get_trainer(model, optimizer, cfg, device, **kwargs):
''' Returns the trainer object.
Args:
model (nn.Module): the Occupancy Network model
optimizer (optimizer): pytorch optimizer object
cfg (dict): imported yaml config
device (device): pytorch device
'''
out_dir = cfg['training']['out_dir']
vis_dir = os.path.join(out_dir, 'vis')
trainer = training.PlushTrainer(
model, optimizer, cfg,
device=device,
vis_dir=vis_dir )
return trainer
def get_generator(model, cfg, device, **kwargs):
''' Returns the generator object.
Args:
model (nn.Module): Occupancy Network model
cfg (dict): imported yaml config
device (device): pytorch device
'''
generator = generation.Generator3D(
model,
device=device,
threshold=cfg['test']['threshold'],
resolution0=cfg['generation']['resolution_0'],
upsampling_steps=cfg['generation']['upsampling_steps'],
sample=cfg['generation']['use_sampling'],
refinement_step=cfg['generation']['refinement_step'],
simplify_nfaces=cfg['generation']['simplify_nfaces'],
padding=cfg['data']['padding'],
vol_info = None,
vol_bound = None,
)
return generator
| 4,514 | Python | 29.1 | 77 | 0.597475 |
NVlabs/ACID/ACID/src/conv_onet/__init__.py | from src.conv_onet import (
config, generation, training, models
)
__all__ = [
config, generation, training, models
]
| 127 | Python | 14.999998 | 40 | 0.661417 |