diff --git a/.gitmodules b/.gitmodules index 1d08b1ee..15da0b04 100755 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,6 @@ [submodule "third_party/isaacgym"] path = third_party/isaacgym - url = ../../kscalelabs/isaacgym.git \ No newline at end of file + url = ../../kscalelabs/isaacgym.git +[submodule "sim/genesis/rsl_rl"] + path = sim/genesis/rsl_rl + url = git@github.com:leggedrobotics/rsl_rl.git diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000..77108c0e --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,4 @@ +{ + "marscode.chatLanguage": "cn", + "CodeGPT.apiKey": "Ollama" +} \ No newline at end of file diff --git a/analyze_onnx.py b/analyze_onnx.py new file mode 100644 index 00000000..e88d4995 --- /dev/null +++ b/analyze_onnx.py @@ -0,0 +1,28 @@ +import onnxruntime as ort +import numpy as np + +# 加载 ONNX 模型 +model_path = "sim/genesis/logs/zeroth-walking/model_100.onnx" +session = ort.InferenceSession(model_path) + +# 获取输入信息 +input_name = session.get_inputs()[0].name +input_shape = session.get_inputs()[0].shape +print(f"Input name: {input_name}, shape: {input_shape}") + +# 创建随机输入数据 +obs_dim = input_shape[1] # 获取观测维度 +dummy_input = np.random.randn(1, obs_dim).astype(np.float32) + +print(f"Dummy input shape: {dummy_input.shape}") +print(f"Dummy input: {dummy_input}") + +# 运行模型 +outputs = session.run(None, {input_name: dummy_input}) +actions = outputs[0] + +# 分析输出 +print(f"Output actions shape: {actions.shape}") +print(f"First 5 actions: {actions[0][:5]}") +print(f"Actions mean: {np.mean(actions)}") +print(f"Actions std: {np.std(actions)}") diff --git a/docker/10_nvidia.json b/docker/10_nvidia.json new file mode 100755 index 00000000..bc724bc5 --- /dev/null +++ b/docker/10_nvidia.json @@ -0,0 +1,7 @@ +{ + "file_format_version" : "1.0.0", + "ICD" : { + "library_path" : "libEGL_nvidia.so.0" + } +} + diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100755 index 00000000..c6f81e62 --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,75 @@ +# 1stly, to build zeroth-bot-sim:v0 +# FROM nvidia/cuda:12.1.0-cudnn8-devel-ubuntu20.04 + +# COPY sources.list /etc/apt/sources.list + +# RUN mkdir /root/.pip +# COPY pip.conf /root/.pip/pip.conf + +# RUN apt install -y wget zlib1g-dev libssl-dev libncurses5-dev libsqlite3-dev libreadline-dev libtk8.6 libgdm-dev libdb4o-cil-dev libpcap-dev +# RUN wget https://mirrors.huaweicloud.com/python/3.8.19/Python-3.8.19.tar.xz && tar -xvf Python-3.8.19.tar.xz + +# WORKDIR /root/Python-3.8.19 +# RUN ./configure --prefix=/usr/local && make && make install + +# WORKDIR /usr/local/bin/ +# RUN ln -s pip3 pip + +# WORKDIR /app/sim/ +# RUN make install-dev + +# RUN wget https://developer.nvidia.com/isaac-gym-preview-4 +# # RUN tar -xvf +# RUN make install-third-party-external + +FROM zeroth-bot-sim:v0 + +WORKDIR /root + +RUN apt update + +# on vnc client use :5901 +RUN apt update && apt install -y \ + --no-install-recommends \ + libxcursor-dev \ + libxrandr-dev \ + libxinerama-dev \ + libxi-dev \ + mesa-common-dev \ + zip \ + unzip \ + make \ + vulkan-utils \ + mesa-vulkan-drivers \ + pigz \ + git \ + libegl1 + +# WAR for eglReleaseThread shutdown crash in libEGL_mesa.so.0 (ensure it's never detected/loaded) +# Can't remove package libegl-mesa0 directly (because of libegl1 which we need) +RUN rm /usr/lib/x86_64-linux-gnu/libEGL_mesa.so.0 /usr/lib/x86_64-linux-gnu/libEGL_mesa.so.0.0.0 /usr/share/glvnd/egl_vendor.d/50_mesa.json + +COPY ./nvidia_icd.json /usr/share/vulkan/icd.d/nvidia_icd.json +COPY ./10_nvidia.json /usr/share/glvnd/egl_vendor.d/10_nvidia.json + + +WORKDIR /app/sim + +RUN useradd --create-home gymuser +USER gymuser + +# copy gym repo to docker +COPY --chown=gymuser . . + +# install gym modules +ENV PATH="/home/gymuser/.local/bin:$PATH" + +RUN export MODEL_DIR=sim/resources/stompymicro + +ENV NVIDIA_VISIBLE_DEVICES=all NVIDIA_DRIVER_CAPABILITIES=all + +CMD ["tail","-f", "/dev/null"] + +# CMD ["python3", "sim/train.py", "--task=stompymicro", "--num_envs=4"] +# python3 sim/train.py --task=stompymicro --num_envs=4 + diff --git a/docker/README.md b/docker/README.md new file mode 100755 index 00000000..43c8969e --- /dev/null +++ b/docker/README.md @@ -0,0 +1,104 @@ +Preliminaries +``` +Ensure nvidia-smi is work on your host machine. +``` + +There is a prebuilt docker image, tested on a amd64 machine, with Ubuntu 24.04 LTS. +``` +docker pull ghcr.io/bigjohnn/zeroth-bot-sim:v1 +``` + +But if that not work, maybe you can build it by yourself. + +Make some changes in your Dockerfile && docker-compose.yml. +``` +PREBUILD a docker image that have external dependencies + +# 1stly, to build zeroth-bot-sim:v0 +# FROM nvidia/cuda:12.1.0-cudnn8-devel-ubuntu20.04 + +# COPY sources.list /etc/apt/sources.list + +# RUN mkdir /root/.pip +# COPY pip.conf /root/.pip/pip.conf + +# RUN apt install -y wget zlib1g-dev libssl-dev libncurses5-dev libsqlite3-dev libreadline-dev libtk8.6 libgdm-dev libdb4o-cil-dev libpcap-dev +# RUN wget https://mirrors.huaweicloud.com/python/3.8.19/Python-3.8.19.tar.xz && tar -xvf Python-3.8.19.tar.xz + +# WORKDIR /root/Python-3.8.19 +# RUN ./configure --prefix=/usr/local && make && make install + +# WORKDIR /usr/local/bin/ +# RUN ln -s pip3 pip + +# WORKDIR /app/sim/ +# RUN make install-dev + +# RUN wget https://developer.nvidia.com/isaac-gym-preview-4 +# # RUN tar -xvf +# RUN make install-third-party-external + +``` + +Then, + +Terminal1: +``` +docker-compose up --build +``` + +``` +ARNING: Found orphan containers (a55a8ae7a762_docker_my_cuda_service_1) for this project. If you removed or renamed this service in your compose file, you can run this command with the --remove-orphans flag to clean it up. +Building my-service +[+] Building 1.6s (10/10) FINISHED docker:default + => [internal] load build definition from Dockerfile 0.2s + => => transferring dockerfile: 348B 0.0s + => [internal] load metadata for docker.io/nvidia/cuda:12.1.0-cudnn8-devel-ubuntu20.04 0.0s + => [internal] load .dockerignore 0.2s + => => transferring context: 2B 0.0s + => [1/5] FROM docker.io/nvidia/cuda:12.1.0-cudnn8-devel-ubuntu20.04 0.0s + => [internal] load build context 0.2s + => => transferring context: 34B 0.0s + => CACHED [2/5] WORKDIR /app 0.0s + => CACHED [3/5] COPY sources.list /etc/apt/sources.list 0.0s + => CACHED [4/5] RUN apt update 0.0s + => CACHED [5/5] WORKDIR /app/sim/ 0.0s + => exporting to image 0.2s + => => exporting layers 0.0s + => => writing image sha256:8e9c02e6c8b50dcbf7d6d1962d51de926126f132b65b65952ad8dfc74634f8c6 0.0s + => => naming to docker.io/library/docker_my-service 0.1s +WARNING: Image for service my-service was built because it did not already exist. To rebuild this image you must use `docker-compose build` or `docker-compose up --build`. +Creating docker_my-service_1 ... done +Attaching to docker_my-service_1 +my-service_1 | +my-service_1 | ========== +my-service_1 | == CUDA == +my-service_1 | ========== +my-service_1 | +my-service_1 | CUDA Version 12.1.0 +my-service_1 | +my-service_1 | Container image Copyright (c) 2016-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +my-service_1 | +my-service_1 | This container image and its contents are governed by the NVIDIA Deep Learning Container License. +my-service_1 | By pulling and using the container, you accept the terms and conditions of this license: +my-service_1 | https://developer.nvidia.com/ngc/nvidia-deep-learning-container-license +my-service_1 | +my-service_1 | A copy of this license is made available in this container at /NGC-DL-CONTAINER-LICENSE for your convenience. +my-service_1 | +my-service_1 | ************************* +my-service_1 | ** DEPRECATION NOTICE! ** +my-service_1 | ************************* +my-service_1 | THIS IMAGE IS DEPRECATED and is scheduled for DELETION. +my-service_1 | https://gitlab.com/nvidia/container-images/cuda/blob/master/doc/support-policy.md +my-service_1 | +``` + +Terminal2: + +``` +docker exec -it docker_zeroth-sim_1 /bin/bash +``` + +``` +gymuser@06aac36e0751:/app/sim# python3 sim/train.py --task=stompymicro --num_envs=4 +``` \ No newline at end of file diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml new file mode 100755 index 00000000..b1c7a822 --- /dev/null +++ b/docker/docker-compose.yml @@ -0,0 +1,24 @@ +# version: '3.8' + +services: + zeroth-sim: + build: + context: . # Assuming the Dockerfile is in the current directory + dockerfile: Dockerfile + volumes: + - /home/hph/Codes/zeroth-bot:/app # Must have absolute path + - /tmp/.X11-unix:/tmp/.X11-unix + environment: + - DISPLAY=$DISPLAY + privileged: true + cap_add: + - SYS_PTRACE + shm_size: 6g + tty: true + stdin_open: true + deploy: + resources: + reservations: + devices: + - capabilities: [gpu] + count: all \ No newline at end of file diff --git a/docker/nvidia_icd.json b/docker/nvidia_icd.json new file mode 100755 index 00000000..cc8c5d6b --- /dev/null +++ b/docker/nvidia_icd.json @@ -0,0 +1,7 @@ +{ + "file_format_version" : "1.0.0", + "ICD": { + "library_path": "libGLX_nvidia.so.0", + "api_version" : "1.1.95" + } +} diff --git a/docker/pip.conf b/docker/pip.conf new file mode 100755 index 00000000..608903e2 --- /dev/null +++ b/docker/pip.conf @@ -0,0 +1,3 @@ +[global] +trusted-host=mirrors.aliyun.com +index-url=http://mirrors.aliyun.com/pypi/simple/ \ No newline at end of file diff --git a/docker/sources.list b/docker/sources.list new file mode 100755 index 00000000..2280baed --- /dev/null +++ b/docker/sources.list @@ -0,0 +1,4 @@ +deb http://mirrors.ustc.edu.cn/ubuntu/ focal main restricted universe multiverse +deb http://mirrors.ustc.edu.cn/ubuntu/ focal-updates main restricted universe multiverse +deb http://mirrors.ustc.edu.cn/ubuntu/ focal-backports main restricted universe multiverse +deb http://mirrors.ustc.edu.cn/ubuntu/ focal-security main restricted universe multiverse \ No newline at end of file diff --git a/examples/model_100.onnx b/examples/model_100.onnx new file mode 100644 index 00000000..45622d85 Binary files /dev/null and b/examples/model_100.onnx differ diff --git a/examples/model_100.pt b/examples/model_100.pt new file mode 100644 index 00000000..c30b8744 Binary files /dev/null and b/examples/model_100.pt differ diff --git a/sim/envs/humanoids/stompymicro_config.py b/sim/envs/humanoids/stompymicro_config.py old mode 100644 new mode 100755 index d0f3c2ae..446ff708 --- a/sim/envs/humanoids/stompymicro_config.py +++ b/sim/envs/humanoids/stompymicro_config.py @@ -1,7 +1,5 @@ """Defines the environment configuration for the Getting up task""" -import numpy as np - from sim.env import robot_urdf_path from sim.envs.base.legged_robot_config import ( # type: ignore LeggedRobotCfg, @@ -40,7 +38,8 @@ class asset(LeggedRobotCfg.asset): file = str(robot_urdf_path(name)) foot_name = ["foot_left", "foot_right"] - knee_name = ["left_knee_pitch_motor", "right_knee_pitch_motor"] + # knee_name = ["left_knee_pitch_motor", "right_knee_pitch_motor"] + knee_name = ["ankle_pitch_left", "ankle_pitch_right"] termination_height = 0.05 default_feet_height = 0.02 @@ -150,7 +149,7 @@ class ranges: lin_vel_x = [-0.05, 0.23] # min max [m/s] lin_vel_y = [-0.04, 0.04] # min max [m/s] ang_vel_yaw = [-0.1, 0.1] # min max [rad/s] - heading = [-np.pi, np.pi] # min max [rad] + heading = [-3.14, 3.14] class rewards: base_height_target = Robot.height diff --git a/sim/genesis/.gitignore b/sim/genesis/.gitignore new file mode 100644 index 00000000..7ed4c135 --- /dev/null +++ b/sim/genesis/.gitignore @@ -0,0 +1,3 @@ +/logs/ +/rsl_rl/ +zeroth_train_crab.py diff --git a/sim/genesis/README.md b/sim/genesis/README.md new file mode 100644 index 00000000..babfd51e --- /dev/null +++ b/sim/genesis/README.md @@ -0,0 +1,34 @@ +# 进入genesis目录。 +`cd sim/genesis` + +# 安装rsl_rl。 +``` +git clone https://github.com/leggedrobotics/rsl_rl +cd rsl_rl && git checkout v2.2.0 && pip install -e . +``` + +# 安装tensorboard。 +`pip install tensorboard` + +安装完成后,通过运行以下命令开始训练: + +`python zeroth_train.py` + +要监控训练过程,请启动TensorBoard: + +`tensorboard --logdir logs` + +查看训练结果。 + +`python zeroth_eval.py` + +模型转换。 + +`cd genesis` + +`python3 utils/convert_to_onnx.py --cfg logs/zeroth-walking/cfgs.pkl --model ../../examples/model_100.pt --output ../../examples/model_100.onnx` + +注意⚠️ +Mac M系列芯片,使用micromamba替换conda使用,例如 +`micromamba activate genesis` +然后再调用pip/python等命令 \ No newline at end of file diff --git a/sim/genesis/README_EN.md b/sim/genesis/README_EN.md new file mode 100644 index 00000000..7ea94fa2 --- /dev/null +++ b/sim/genesis/README_EN.md @@ -0,0 +1,75 @@ +# Genesis Simulation Environment - English Version + +## Getting Started + +### Navigate to genesis directory +```bash +cd sim/genesis +``` + +### Install rsl_rl +```bash +git clone https://github.com/leggedrobotics/rsl_rl +cd rsl_rl && git checkout v2.2.0 && pip install -e . +``` + +### Install TensorBoard +```bash +pip install tensorboard +``` + +## Training + +After installation, start training by running: + +```bash +python zeroth_train.py +``` + +To monitor the training process, start TensorBoard: + +```bash +tensorboard --logdir logs +``` + +## Model Conversion to ONNX + +To convert trained models to ONNX format: + +```bash +python utils/convert_to_onnx.py \ + --cfg logs/zeroth-walking/cfgs.pkl \ + --model ../../examples/model_100.pt \ + --output ../../examples/model_100.onnx +``` + +Arguments: +- `--cfg`: Path to config file (.pkl) +- `--model`: Path to model file (.pt) +- `--output`: Output ONNX file path + +## Evaluation + +To view training results: + +```bash +python zeroth_eval.py +``` + +## Additional Notes + +1. Make sure all dependencies are installed properly +2. Training logs will be saved in the `logs` directory +3. For advanced configuration, refer to the training script parameters +4. Ensure proper GPU setup if available for faster training +5. For Mac M-series chips, use micromamba instead of conda: + ```bash + micromamba activate genesis + ``` + Then proceed with pip/python commands + +## Troubleshooting + +- If encountering dependency issues, try creating a virtual environment +- Check Python version compatibility (requires Python 3.8+) +- Verify CUDA installation if using GPU acceleration diff --git a/sim/genesis/rsl_rl b/sim/genesis/rsl_rl new file mode 160000 index 00000000..1cba8fb4 --- /dev/null +++ b/sim/genesis/rsl_rl @@ -0,0 +1 @@ +Subproject commit 1cba8fb425c21e071fd024c33e25e7a5f5831163 diff --git a/sim/genesis/utils/convert_to_onnx.py b/sim/genesis/utils/convert_to_onnx.py new file mode 100644 index 00000000..71f74c70 --- /dev/null +++ b/sim/genesis/utils/convert_to_onnx.py @@ -0,0 +1,67 @@ +import torch +import torch.nn as nn +import pickle +import argparse + +def parse_args(): + parser = argparse.ArgumentParser(description='Convert model to ONNX format') + parser.add_argument('--cfg', type=str, required=True, + help='Path to config file (.pkl)') + parser.add_argument('--model', type=str, required=True, + help='Path to model file (.pt)') + parser.add_argument('--output', type=str, required=True, + help='Output ONNX file path') + return parser.parse_args() + +args = parse_args() + +# 加载配置 +with open(args.cfg, 'rb') as f: + cfgs = pickle.load(f) + print(f"Loaded config: {cfgs}") + +# 加载模型 +model_dict = torch.load(args.model, weights_only=True) + +# 创建继承自ActorCritic的模型类 +from rsl_rl.modules import ActorCritic + +class ExportModel(ActorCritic): + def forward(self, obs): + # 使用actor网络生成动作 + actions = self.actor(obs) + return actions + +# 根据配置创建模型实例 +model = ExportModel( + num_actor_obs=cfgs[1]['num_single_obs'], + num_critic_obs=cfgs[1]['num_single_obs'], + num_actions=cfgs[0]['num_actions'], + actor_hidden_dims=cfgs[4]['policy']['actor_hidden_dims'], + critic_hidden_dims=cfgs[4]['policy']['critic_hidden_dims'], + activation='elu', + init_noise_std=cfgs[4]['policy']['init_noise_std'] +) + +# 加载模型参数 +model.load_state_dict(model_dict['model_state_dict']) +model.eval() + +# 创建示例输入 +obs_dim = cfgs[1]['num_single_obs'] # 从obs_cfg获取观测维度 +dummy_input = torch.randn(1, obs_dim) + +# 转换为ONNX +torch.onnx.export( + model, + dummy_input, + args.output, + input_names=['obs'], + output_names=['actions'], + dynamic_axes={ + 'obs': {0: 'batch_size'}, + 'actions': {0: 'batch_size'} + } +) + +print("Model successfully converted to ONNX format") diff --git a/sim/genesis/utils/temp_read_pkl.py b/sim/genesis/utils/temp_read_pkl.py new file mode 100644 index 00000000..bd9f798c --- /dev/null +++ b/sim/genesis/utils/temp_read_pkl.py @@ -0,0 +1,6 @@ +import pickle +from pprint import pprint + +with open('./logs/zeroth-walking/cfgs.pkl', 'rb') as f: + data = pickle.load(f) + pprint(data, indent=2, width=80, depth=3) diff --git a/sim/genesis/zeroth_env.py b/sim/genesis/zeroth_env.py new file mode 100755 index 00000000..91d0a516 --- /dev/null +++ b/sim/genesis/zeroth_env.py @@ -0,0 +1,475 @@ +import torch +import math +import collections +import genesis as gs +from genesis.utils.geom import quat_to_xyz, transform_by_quat, inv_quat, transform_quat_by_quat +import numpy as np + +def gs_rand_float(lower, upper, shape, device): + return (upper - lower) * torch.rand(size=shape, device=device) + lower + +def linear_interpolation(start, end, t, max_t): + return start + (end - start) * (t / max_t) + +def get_from_curriculum(curriculum, t, max_t): + min_start = curriculum["start"][0] + min_end = curriculum["end"][0] + max_start = curriculum["start"][1] + max_end = curriculum["end"][1] + min_value = linear_interpolation(min_start, min_end, t, max_t) + max_value = linear_interpolation(max_start, max_end, t, max_t) + return np.random.uniform(min_value, max_value) + +class ZerothEnv: + def __init__(self, num_envs, env_cfg, obs_cfg, reward_cfg, command_cfg, show_viewer=False, device="mps"): + self.device = torch.device(device) + self.total_steps = 0 + self.max_steps = 40_000_000 + self.num_envs = num_envs + self.num_single_obs = obs_cfg["num_single_obs"] + self.num_actions = env_cfg["num_actions"] + self.num_commands = command_cfg["num_commands"] + + # observation history + self.frame_stack = obs_cfg.get("frame_stack", 1) + self.num_obs = self.num_single_obs * self.frame_stack + self.c_frame_stack = obs_cfg.get("c_frame_stack", 1) + self.obs_history = collections.deque(maxlen=self.frame_stack) + self.critic_history = collections.deque(maxlen=self.c_frame_stack) + + for _ in range(self.frame_stack): + self.obs_history.append( + torch.zeros(self.num_envs, self.num_single_obs, dtype=torch.float, device=self.device) + ) + + self.simulate_action_latency = True # there is a 1 step latency on real robot + self.dt = 0.02 # control frequence on real robot is 50Hz + self.max_episode_length = math.ceil(env_cfg["episode_length_s"] / self.dt) + + self.env_cfg = env_cfg + self.obs_cfg = obs_cfg + self.reward_cfg = reward_cfg + self.command_cfg = command_cfg + + self.obs_scales = obs_cfg["obs_scales"] + self.reward_scales = reward_cfg["reward_scales"] + + # create scene + self.scene = gs.Scene( + sim_options=gs.options.SimOptions(dt=self.dt, substeps=4), + viewer_options=gs.options.ViewerOptions( + max_FPS=int(0.5 / self.dt), + camera_pos=(2.0, 0.0, 2.5), + camera_lookat=(0.0, 0.0, 0.5), + camera_fov=40, + ), + vis_options=gs.options.VisOptions(n_rendered_envs=1), + rigid_options=gs.options.RigidOptions( + dt=self.dt, + constraint_solver=gs.constraint_solver.Newton, + enable_collision=True, + enable_joint_limit=True, + gravity=(0, 0, -9.81) + ), + show_viewer=show_viewer, + ) + + self.horizontal_scale = 0.5 + terrain_length = 12.0 + half_terrain_length = terrain_length * 0.5 + # add terrain + self.terrain = self.scene.add_entity(gs.morphs.Terrain( + n_subterrains=(1, 1), + subterrain_size=(terrain_length, terrain_length), + pos=(-half_terrain_length, -half_terrain_length, 0.0), # align with the world origin + randomize=True, + subterrain_types="wave_terrain", + horizontal_scale=self.horizontal_scale, + vertical_scale=0.002, + visualization=True, + collision=True + )) + + # add robot + self.base_init_pos = torch.tensor(self.env_cfg["base_init_pos"], device=self.device) + self.base_init_quat = torch.tensor(self.env_cfg["base_init_quat"], device=self.device) + self.inv_base_init_quat = inv_quat(self.base_init_quat) + self.robot = self.scene.add_entity( + gs.morphs.URDF( + file="sim/resources/stompymicro/robot_fixed.urdf", + pos=self.base_init_pos.cpu().numpy(), + quat=self.base_init_quat.cpu().numpy(), + ), + ) + + # build + self.scene.build(n_envs=num_envs, env_spacing=(1.0,1.0)) + + # names to indices + self.motor_dofs = [self.robot.get_joint(name).dof_idx_local for name in self.env_cfg["dof_names"]] + # Initialize legs_joints mapping with bounds checking + self.legs_joints = {} + joint_names = ["left_hip_pitch", "left_knee_pitch", "left_ankle_pitch", + "right_hip_pitch", "right_knee_pitch", "right_ankle_pitch"] + + for i, name in enumerate(joint_names): + if i < len(self.motor_dofs): + self.legs_joints[name] = self.motor_dofs[i] - 6 # 0,1,2,3,4,5, 6,7,8,9 + else: + print(f"Warning: Joint {name} not found in motor_dofs") + + # Get number of bodies in the robot + self.num_bodies = self.robot.n_links + + self.rand_push_force = torch.zeros((self.num_envs, 3), device=self.device) + self.rand_push_torque = torch.zeros((self.num_envs, 3), device=self.device) + self.env_frictions = torch.ones((self.num_envs,), device=self.device) + + # PD control parameters + self.robot.set_dofs_kp([self.env_cfg["kp"]] * self.num_actions, self.motor_dofs) + self.robot.set_dofs_kv([self.env_cfg["kd"]] * self.num_actions, self.motor_dofs) + + # prepare reward functions and multiply reward scales by dt + self.reward_functions, self.episode_sums = dict(), dict() + for name in self.reward_scales.keys(): + self.reward_scales[name] *= self.dt + self.reward_functions[name] = getattr(self, "_reward_" + name) + self.episode_sums[name] = torch.zeros((self.num_envs,), device=self.device, dtype=gs.tc_float) + + # initialize buffers + self.base_lin_vel = torch.zeros((self.num_envs, 3), device=self.device, dtype=gs.tc_float) + self.base_ang_vel = torch.zeros((self.num_envs, 3), device=self.device, dtype=gs.tc_float) + self.projected_gravity = torch.zeros((self.num_envs, 3), device=self.device, dtype=gs.tc_float) + self.global_gravity = torch.tensor([0.0, 0.0, -9.81], device=self.device, dtype=gs.tc_float).repeat( + self.num_envs, 1 + ) + # observation buffers + self.obs_buf = torch.zeros((self.num_envs, self.num_obs), device=self.device, dtype=gs.tc_float) + + self.rew_buf = torch.zeros((self.num_envs,), device=self.device, dtype=gs.tc_float) + self.reset_buf = torch.ones((self.num_envs,), device=self.device, dtype=gs.tc_int) + self.episode_length_buf = torch.zeros((self.num_envs,), device=self.device, dtype=gs.tc_int) + self.commands = torch.zeros((self.num_envs, self.num_commands), device=self.device, dtype=gs.tc_float) + self.commands_scale = torch.tensor( + [self.obs_scales["lin_vel"], self.obs_scales["lin_vel"], self.obs_scales["ang_vel"]], + device=self.device, + dtype=gs.tc_float, + ) + self.actions = torch.zeros((self.num_envs, self.num_actions), device=self.device, dtype=gs.tc_float) + self.last_actions = torch.zeros_like(self.actions) + self.dof_pos = torch.zeros_like(self.actions) + self.dof_vel = torch.zeros_like(self.actions) + self.last_dof_vel = torch.zeros_like(self.actions) + self.base_pos = torch.zeros((self.num_envs, 3), device=self.device, dtype=gs.tc_float) + self.base_quat = torch.zeros((self.num_envs, 4), device=self.device, dtype=gs.tc_float) + self.default_dof_pos = torch.tensor( + [self.env_cfg["default_joint_angles"][name] for name in self.env_cfg["dof_names"]], + device=self.device, + dtype=gs.tc_float, + ) + # Initialize ref_dof_pos + self.ref_dof_pos = self.default_dof_pos.repeat(self.num_envs, 1).to(self.device) + self.extras = { + "observations": {}, + } # extra information for logging + + # Initialize missing variables + self.default_joint_pd_target = self.default_dof_pos.clone() + self.base_euler = torch.zeros((self.num_envs, 3), device=self.device) + self.filtered_base_height = torch.zeros(self.num_envs, device=self.device) + # Initialize terrain difficulty + self.terrain_difficulty = torch.zeros(self.num_envs, device=self.device) + self.difficulty_factors = { + "random_uniform_terrain": 0.3, + } + + def _resample_commands(self, envs_idx): + self.commands[envs_idx, 0] = gs_rand_float(*self.command_cfg["lin_vel_x_range"], (len(envs_idx),), self.device) + self.commands[envs_idx, 1] = gs_rand_float(*self.command_cfg["lin_vel_y_range"], (len(envs_idx),), self.device) + self.commands[envs_idx, 2] = gs_rand_float(*self.command_cfg["ang_vel_range"], (len(envs_idx),), self.device) + + def step(self, actions): + self.actions = torch.clip(actions, -self.env_cfg["clip_actions"], self.env_cfg["clip_actions"]) + # Update last actions before execution + self.last_actions[:] = self.actions[:] + exec_actions = self.last_actions if self.simulate_action_latency else self.actions + target_dof_pos = exec_actions * self.env_cfg["action_scale"] + self.default_dof_pos + self.robot.control_dofs_position(target_dof_pos, self.motor_dofs) + self.scene.step() + + # update buffers + self.episode_length_buf += 1 + self.base_pos[:] = self.robot.get_pos() + self.base_quat[:] = self.robot.get_quat() + self.base_euler = quat_to_xyz( + transform_quat_by_quat(torch.ones_like(self.base_quat) * self.inv_base_init_quat, self.base_quat) + ) + inv_base_quat = inv_quat(self.base_quat) + self.base_lin_vel[:] = transform_by_quat(self.robot.get_vel(), inv_base_quat) + self.base_ang_vel[:] = transform_by_quat(self.robot.get_ang(), inv_base_quat) + self.projected_gravity = transform_by_quat(self.global_gravity, inv_base_quat) + self.dof_pos[:] = self.robot.get_dofs_position(self.motor_dofs) + self.dof_vel[:] = self.robot.get_dofs_velocity(self.motor_dofs) + + # resample commands + envs_idx = ( + (self.episode_length_buf % int(self.env_cfg["resampling_time_s"] / self.dt) == 0) + .nonzero(as_tuple=False) + .flatten() + ) + self._resample_commands(envs_idx) + + # check termination and reset + self.reset_buf = self.episode_length_buf > self.max_episode_length + self.reset_buf |= torch.abs(self.base_euler[:, 1]) > self.env_cfg["termination_if_pitch_greater_than"] + self.reset_buf |= torch.abs(self.base_euler[:, 0]) > self.env_cfg["termination_if_roll_greater_than"] + + time_out_idx = (self.episode_length_buf > self.max_episode_length).nonzero(as_tuple=False).flatten() + self.extras["time_outs"] = torch.zeros_like(self.reset_buf, device=self.device, dtype=gs.tc_float) + self.extras["time_outs"][time_out_idx] = 1.0 + + self.reset_idx(self.reset_buf.nonzero(as_tuple=False).flatten()) + + # Update reference state before computing rewards + self.compute_ref_state() + + # compute reward + self.rew_buf[:] = 0.0 + for name, reward_func in self.reward_functions.items(): + rew = reward_func() * self.reward_scales[name] + self.rew_buf += rew + self.episode_sums[name] += rew + + # Compute observations + self.compute_observations() + + self.last_actions[:] = self.actions[:] + self.last_dof_vel[:] = self.dof_vel[:] + + # obs, rewards, dones, infos + return self.obs_buf, self.rew_buf, self.reset_buf, self.extras + + def get_observations(self): + return self.obs_buf, self.extras + + def _get_phase(self): + cycle_time = self.env_cfg.get("cycle_time", 1.0) + phase = self.episode_length_buf * self.dt / cycle_time + return phase + + def compute_ref_state(self): + phase = self._get_phase() + sin_pos = torch.sin(2 * torch.pi * phase) + sin_pos_l = sin_pos.clone() + sin_pos_r = sin_pos.clone() + self.ref_dof_pos = self.default_dof_pos.repeat(self.num_envs, 1).to(self.device) + + scale_1 = self.env_cfg.get("target_joint_pos_scale", 0.1) + scale_2 = 2 * scale_1 + + # Validate joint indices before accessing ref_dof_pos + def safe_update(joint_name, scale): + if joint_name in self.legs_joints: + idx = self.legs_joints[joint_name] + if idx < self.ref_dof_pos.shape[1]: + self.ref_dof_pos[:, idx] += scale + else: + print(f"Warning: Joint index {idx} for {joint_name} is out of bounds") + else: + print(f"Warning: Joint {joint_name} not found in legs_joints") + + # left foot stance phase set to default joint pos + sin_pos_l[sin_pos_l > 0] = 0 + safe_update("left_hip_pitch", sin_pos_l * scale_1) + safe_update("left_knee_pitch", sin_pos_l * scale_2) + safe_update("left_ankle_pitch", sin_pos_l * scale_1) + + # right foot stance phase set to default joint pos + sin_pos_r[sin_pos_r < 0] = 0 + safe_update("right_hip_pitch", sin_pos_r * scale_1) + safe_update("right_knee_pitch", sin_pos_r * scale_2) + safe_update("right_ankle_pitch", sin_pos_r * scale_1) + + # Double support phase + self.ref_dof_pos[torch.abs(sin_pos) < 0.1] = 0 + + def compute_observations(self): + phase = self._get_phase() + self.compute_ref_state() + + sin_pos = torch.sin(2 * torch.pi * phase).unsqueeze(1) + cos_pos = torch.cos(2 * torch.pi * phase).unsqueeze(1) + + self.command_input = torch.cat((sin_pos, cos_pos, self.commands[:, :3] * self.commands_scale), dim=1) + q = (self.dof_pos - self.default_dof_pos) * self.obs_scales["dof_pos"] + dq = self.dof_vel * self.obs_scales["dof_vel"] + + obs_buf = torch.cat( # total 41 dim + ( + self.command_input, # 5 = 2D(sin cos) + 3D(vel_x, vel_y, aug_vel_yaw) + q, # 10D + dq, # 10D + self.actions, # 10D + self.base_ang_vel * self.obs_scales["ang_vel"], # 3 + self.base_euler * self.obs_scales["quat"], # 3 + ), + dim=-1, + ) + + self.obs_buf = obs_buf + + def reset_idx(self, envs_idx): + if len(envs_idx) == 0: + return + # Sample uniform multipliers between min and max + kp_mult = gs_rand_float( + self.env_cfg["kp_multipliers"][0], + self.env_cfg["kp_multipliers"][1], + (1,), + self.device + ) + kd_mult = gs_rand_float( + self.env_cfg["kd_multipliers"][0], + self.env_cfg["kd_multipliers"][1], + (1,), + self.device + ) + + # Apply multipliers to default values + kp_values = torch.full( + (self.num_actions,), + self.env_cfg["kp"] * kp_mult.item(), + device=self.device + ) + kd_values = torch.full( + (self.num_actions,), + self.env_cfg["kd"] * kd_mult.item(), + device=self.device + ) + + # Set the PD gains + self.robot.set_dofs_kp(kp_values, self.motor_dofs) + self.robot.set_dofs_kv(kd_values, self.motor_dofs) + + # friction + friction = get_from_curriculum(self.env_cfg["env_friction_range"], self.total_steps, self.max_steps) + self.robot.set_friction(friction) + + # link mass + link_mass_mult = get_from_curriculum(self.env_cfg["link_mass_multipliers"], self.total_steps, self.max_steps) + for link in self.robot.links: + link.set_mass(link.get_mass() * link_mass_mult) + + # reset dofs + self.dof_pos[envs_idx] = self.default_dof_pos + self.dof_vel[envs_idx] = 0.0 + self.robot.set_dofs_position( + position=self.dof_pos[envs_idx], + dofs_idx_local=self.motor_dofs, + zero_velocity=True, + envs_idx=envs_idx, + ) + + # reset base + self.base_pos[envs_idx] = self.base_init_pos + self.base_quat[envs_idx] = self.base_init_quat.reshape(1, -1) + self.robot.set_pos(self.base_pos[envs_idx], zero_velocity=False, envs_idx=envs_idx) + self.robot.set_quat(self.base_quat[envs_idx], zero_velocity=False, envs_idx=envs_idx) + self.base_lin_vel[envs_idx] = 0 + self.base_ang_vel[envs_idx] = 0 + self.robot.zero_all_dofs_velocity(envs_idx) + + # reset buffers + self.last_actions[envs_idx] = 0.0 + self.last_dof_vel[envs_idx] = 0.0 + self.episode_length_buf[envs_idx] = 0 + self.reset_buf[envs_idx] = True + + # fill extras + self.extras["episode"] = {} + for key in self.episode_sums.keys(): + self.extras["episode"]["rew_" + key] = ( + torch.mean(self.episode_sums[key][envs_idx]).item() / self.env_cfg["episode_length_s"] + ) + self.episode_sums[key][envs_idx] = 0.0 + + self._resample_commands(envs_idx) + + def reset(self): + self.reset_buf[:] = True + self.reset_idx(torch.arange(self.num_envs, device=self.device)) + return self.obs_buf, None + + # ------------ reward functions---------------- + def _reward_tracking_lin_vel(self): + # Tracking of linear velocity commands (xy axes) + lin_vel_error = torch.sum(torch.square(self.commands[:, :2] - self.base_lin_vel[:, :2]), dim=1) + return torch.exp(-lin_vel_error / self.reward_cfg["tracking_sigma"]) + + def _reward_tracking_ang_vel(self): + # Tracking of angular velocity commands (yaw) + ang_vel_error = torch.square(self.commands[:, 2] - self.base_ang_vel[:, 2]) + return torch.exp(-ang_vel_error / self.reward_cfg["tracking_sigma"]) + + def _reward_lin_vel_z(self): + # Penalize z axis base linear velocity + return torch.square(self.base_lin_vel[:, 2]) + + def _reward_action_rate(self): + # Penalize changes in actions + return torch.sum(torch.square(self.last_actions - self.actions), dim=1) + + def _reward_similar_to_default(self): + # Penalize joint poses far away from default pose + return torch.sum(torch.abs(self.dof_pos - self.default_dof_pos), dim=1) + + def _reward_base_height(self): + # 获取全局地形高度场 + terrain_hf = self.terrain.terrain_hf # 地形高度场矩阵 + + # 将基座坐标转换到地形局部坐标系(地形原点在(0,0)) + terrain_local_pos = self.base_pos[:, :2] # 直接使用全局坐标 + + # 转换为高度场索引 + x_idx = (terrain_local_pos[:, 0] / self.horizontal_scale).long() + y_idx = (terrain_local_pos[:, 1] / self.horizontal_scale).long() + + # 添加边界保护 + x_idx = torch.clamp(x_idx, 0, terrain_hf.shape[0]-1) + y_idx = torch.clamp(y_idx, 0, terrain_hf.shape[1]-1) + + # 确保地形高度场是二维数组 + if not isinstance(terrain_hf, np.ndarray): + terrain_hf = np.array(terrain_hf, dtype=np.float32) + + # 将地形高度场转换为PyTorch张量并移到GPU + terrain_hf_tensor = torch.from_numpy(terrain_hf).float().to(self.device) + + # 确保索引在有效范围内 + x_indices = torch.clamp(x_idx, 0, terrain_hf_tensor.shape[0]-1).long() + y_indices = torch.clamp(y_idx, 0, terrain_hf_tensor.shape[1]-1).long() + + # 使用张量索引保持设备一致性 + terrain_height = terrain_hf_tensor[x_indices, y_indices] + + # 确保结果维度正确 + if terrain_height.dim() == 0: + terrain_height = terrain_height.unsqueeze(0) + + # 计算基座相对高度 + base_height_above_terrain = self.base_pos[:, 2] - terrain_height + height_error = torch.abs(base_height_above_terrain - self.reward_cfg["base_height_target"]) + + return torch.exp(-height_error * self.reward_scales["base_height"]) + + def _reward_gait_symmetry(self): + # Reward symmetric gait patterns + left_hip = self.dof_pos[:, self.env_cfg["dof_names"].index("left_hip_pitch")] + right_hip = self.dof_pos[:, self.env_cfg["dof_names"].index("right_hip_pitch")] + left_knee = self.dof_pos[:, self.env_cfg["dof_names"].index("left_knee_pitch")] + right_knee = self.dof_pos[:, self.env_cfg["dof_names"].index("right_knee_pitch")] + + hip_symmetry = torch.abs(left_hip - right_hip) + knee_symmetry = torch.abs(left_knee - right_knee) + + return torch.exp(-(hip_symmetry + knee_symmetry)) diff --git a/sim/genesis/zeroth_eval.py b/sim/genesis/zeroth_eval.py new file mode 100644 index 00000000..f8fc08fa --- /dev/null +++ b/sim/genesis/zeroth_eval.py @@ -0,0 +1,60 @@ +import argparse +import os +import pickle + +import torch +from zeroth_env import ZerothEnv +from rsl_rl.runners import OnPolicyRunner + +import genesis as gs + +def run_sim(env, policy, obs): + while True: + actions = policy(obs) + obs, _, _, _ = env.step(actions) + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("-e", "--exp_name", type=str, default="zeroth-walking") + parser.add_argument("--ckpt", type=int, default=100) + args = parser.parse_args() + + gs.init() + + log_dir = f"logs/{args.exp_name}" + env_cfg, obs_cfg, reward_cfg, command_cfg, train_cfg = pickle.load(open(f"logs/{args.exp_name}/cfgs.pkl", "rb")) + # env_cfg, obs_cfg, reward_cfg, command_cfg, train_cfg = pickle.load(open(f"sim/genesis/logs/{args.exp_name}/cfgs.pkl", "rb")) + # Add missing class_name fields + train_cfg["algorithm"]["class_name"] = "PPO" + train_cfg["policy"]["class_name"] = "ActorCritic" + print("train_cfg:", train_cfg) # Add debug print + reward_cfg["reward_scales"] = {} + + env = ZerothEnv( + num_envs=1, + env_cfg=env_cfg, + obs_cfg=obs_cfg, + reward_cfg=reward_cfg, + command_cfg=command_cfg, + show_viewer=True, + ) + + # runner = OnPolicyRunner(env, train_cfg, log_dir, device="cuda:0") + runner = OnPolicyRunner(env, train_cfg, log_dir, device="mps") + resume_path = os.path.join(log_dir, f"model_{args.ckpt}.pt") + runner.load(resume_path) + # policy = runner.get_inference_policy(device="cuda:0") + policy = runner.get_inference_policy(device="mps") + + obs, _ = env.reset() + with torch.no_grad(): + gs.tools.run_in_another_thread(fn=run_sim, args=(env, policy, obs)) # start the simulation in another thread + env.scene.viewer.start() # start the viewer in the main thread (the render thread) + +if __name__ == "__main__": + main() + +""" +# evaluation +python examples/locomotion/zeroth_eval.py -e zeroth-walking -v --ckpt 100 +""" diff --git a/sim/genesis/zeroth_train.py b/sim/genesis/zeroth_train.py new file mode 100755 index 00000000..8542b66d --- /dev/null +++ b/sim/genesis/zeroth_train.py @@ -0,0 +1,183 @@ +import argparse +import os +import pickle +import shutil + +from zeroth_env import ZerothEnv +from rsl_rl.runners import OnPolicyRunner + +import genesis as gs + +def get_train_cfg(exp_name, max_iterations): + + train_cfg_dict = { + "num_steps_per_env": 64, # 增加步数以收集更多样本 + "save_interval": 10, + "empirical_normalization": True, + "algorithm": { + "rnd_cfg": None, + "symmetry_cfg": None, + "clip_param": 0.2, + "desired_kl": 0.01, + "entropy_coef": 0.02, # 增加熵系数以鼓励探索 + "gamma": 0.99, + "lam": 0.95, + "learning_rate": 0.0005, # 降低学习率以提高稳定性 + "max_grad_norm": 1.0, + "num_learning_epochs": 5, + "num_mini_batches": 8, # 增加mini-batch数量 + "schedule": "adaptive", + "use_clipped_value_loss": True, + "value_loss_coef": 1.0, + "class_name": "PPO", + }, + "policy": { + "activation": "elu", + "actor_hidden_dims": [512, 256, 128], + "critic_hidden_dims": [512, 256, 128], + "init_noise_std": 1.0, + "class_name": "ActorCritic", + }, + "runner": { + "algorithm_class_name": "PPO", + "experiment_name": exp_name, + "run_name": "zeroth-walking", + "device": "mps", + "checkpoint_interval": 5, # 每5次迭代保存checkpoint + "max_checkpoints": 10 # 最多保留3个最新checkpoint + } + } + + return train_cfg_dict + + +def get_cfgs(): + default_joint_angles={ # [rad] + "right_hip_pitch": 0.0, + "left_hip_pitch": 0.0, + "right_hip_yaw": 0.0, + "left_hip_yaw": 0.0, + "right_hip_roll": 0.0, + "left_hip_roll": 0.0, + "right_knee_pitch": 0.0, + "left_knee_pitch": 0.0, + "right_ankle_pitch": 0.0, + "left_ankle_pitch": 0.0, + } + env_cfg = { + "num_actions": 10, # 动作的数量 + # joint/link names + "default_joint_angles": default_joint_angles, # 默认关节角度 + "dof_names": list(default_joint_angles.keys()), # 关节名称列表 + # friction + "env_friction_range": { + "start": [0.9, 1.1], + "end": [0.9, 1.1], + }, + # link mass + # varying this too much collapses the training + "link_mass_multipliers": { + "start": [1.0, 1.0], + "end": [1.0, 1.0], + }, + # RFI + "rfi_scale": 0.1, + # PD + "kp": 20.0, # 比例增益(Proportional gain) + "kd": 0.5, # 微分增益(Derivative gain) + "kp_multipliers": [0.75, 1.25], + "kd_multipliers": [0.75, 1.25], + # termination + "termination_if_roll_greater_than": 10, # 如果滚转角度大于10度则终止 + "termination_if_pitch_greater_than": 10, # 如果俯仰角度大于10度则终止 + # base pose + "base_init_pos": [0.0, 0.0, 0.42], # 基础初始位置 + "base_init_quat": [1.0, 0.0, 0.0, 0.0], # 基础初始四元数(表示旋转) + "episode_length_s": 20.0, # 每个训练回合的时长(秒) + "resampling_time_s": 4.0, # 重新采样时间间隔(秒) + "action_scale": 0.25, # 动作缩放比例 + "simulate_action_latency": True, # 是否模拟动作延迟 + "clip_actions": 18.0, # 动作裁剪阈值 + } + obs_cfg = { + "num_single_obs": 41, + "obs_scales": { + "dof_pos": 1.0, + "dof_vel": 0.05, + "ang_vel": 1.0, + "lin_vel": 2.0, + "quat": 1.0, + }, + } + reward_cfg = { + "base_height_target": 0.32, + "min_dist": 0.03, + "max_dist": 0.14, + "target_joint_pos_scale": 0.17, + "target_feet_height": 0.02, + "cycle_time": 0.4, + "only_positive_rewards": True, + "tracking_sigma": 0.25, + "max_contact_force": 100, + "reward_scales": { + "tracking_lin_vel": 1.2, + "tracking_ang_vel": 0.3, + "lin_vel_z": -1.5, + "action_rate": -0.01, + "similar_to_default": -0.2, + "base_height": 0.3, + }, + } + command_cfg = { + "num_commands": 3, + "lin_vel_y_range": [-0.6, -0.6], # move faster than above! + "lin_vel_x_range": [-0.01, 0.01], + "ang_vel_range": [-0.01, 0.01], + } + + return env_cfg, obs_cfg, reward_cfg, command_cfg + +def run_learner(runner, max_iterations): + runner.learn(num_learning_iterations=max_iterations, init_at_random_ep_len=True) + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("-e", "--exp_name", type=str, default="zeroth-walking") + parser.add_argument("-B", "--num_envs", type=int, default=4096) + parser.add_argument("-V", "--vis", type=bool, default=False) + parser.add_argument("--max_iterations", type=int, default=101) + args = parser.parse_args() + + gs.init(logging_level="warning") + + log_dir = f"logs/{args.exp_name}" + env_cfg, obs_cfg, reward_cfg, command_cfg = get_cfgs() + train_cfg = get_train_cfg(args.exp_name, args.max_iterations) + + if os.path.exists(log_dir): + shutil.rmtree(log_dir) + os.makedirs(log_dir, exist_ok=True) + + env = ZerothEnv( + num_envs=args.num_envs, env_cfg=env_cfg, obs_cfg=obs_cfg, reward_cfg=reward_cfg, command_cfg=command_cfg,show_viewer=args.vis, + ) + + # runner = OnPolicyRunner(env, train_cfg, log_dir, device="cuda:0") + runner = OnPolicyRunner(env, train_cfg, log_dir, device="mps") + + pickle.dump( + [env_cfg, obs_cfg, reward_cfg, command_cfg, train_cfg], + open(f"{log_dir}/cfgs.pkl", "wb"), + ) + + gs.tools.run_in_another_thread(fn=run_learner, args=(runner, args.max_iterations)) # start the simulation in another thread + if args.vis: + env.scene.viewer.start() # start the viewer in the main thread (the render thread) + +if __name__ == "__main__": + main() + +""" +# training +python examples/locomotion/zeroth_train.py +""" diff --git a/sim/resources/stompymicro/joints.py b/sim/resources/stompymicro/joints.py index 1f59ff1e..2c83543c 100644 --- a/sim/resources/stompymicro/joints.py +++ b/sim/resources/stompymicro/joints.py @@ -97,14 +97,14 @@ class Robot(Node): def default_standing(cls) -> Dict[str, float]: return { # Legs - cls.legs.left.hip_pitch: 0.23, - cls.legs.left.knee_pitch: -0.741, + cls.legs.left.hip_pitch: 0.13, + cls.legs.left.knee_pitch: 0,#-0.741, cls.legs.left.hip_yaw: 0, cls.legs.left.hip_roll: 0, - cls.legs.left.ankle_pitch: -0.5, - cls.legs.right.hip_pitch: -0.23, - cls.legs.right.knee_pitch: 0.741, - cls.legs.right.ankle_pitch: 0.5, + cls.legs.left.ankle_pitch: 0,#-0.5, + cls.legs.right.hip_pitch: 0.13, + cls.legs.right.knee_pitch: 0,#0.741, + cls.legs.right.ankle_pitch: 0,#0.5, cls.legs.right.hip_yaw: 0, cls.legs.right.hip_roll: 0, } @@ -121,10 +121,10 @@ def default_limits(cls) -> Dict[str, Dict[str, float]]: # "lower": -0.43633231, # "upper": 1.5707963, # }, - # Robot.left_arm.elbow_pitch: { - # "lower": -1.5707963, - # "upper": 1.5707963, - # }, + Robot.left_arm.elbow_pitch: { + "lower": 1.5707963, + "upper": 4.71, + }, # # Right Arm # Robot.right_arm.shoulder_pitch: { # "lower": -1.7453293, @@ -134,10 +134,10 @@ def default_limits(cls) -> Dict[str, Dict[str, float]]: # "lower": -1.134464, # "upper": 0.87266463, # }, - # Robot.right_arm.elbow_pitch: { - # "lower": -1.5707963, - # "upper": 1.5707963, - # }, + Robot.right_arm.elbow_pitch: { + "lower": 1.5707963, + "upper": 4.71, + }, # Left Leg Robot.legs.left.hip_pitch: { "lower": -1.5707963, diff --git a/sim/resources/stompymicro/meshes/Torso.stl b/sim/resources/stompymicro/meshes/Torso.stl new file mode 100644 index 00000000..d2561551 Binary files /dev/null and b/sim/resources/stompymicro/meshes/Torso.stl differ diff --git a/sim/resources/stompymicro/meshes/ankle_pitch_left.stl b/sim/resources/stompymicro/meshes/ankle_pitch_left.stl new file mode 100644 index 00000000..28fcc816 Binary files /dev/null and b/sim/resources/stompymicro/meshes/ankle_pitch_left.stl differ diff --git a/sim/resources/stompymicro/meshes/ankle_pitch_right.stl b/sim/resources/stompymicro/meshes/ankle_pitch_right.stl new file mode 100644 index 00000000..c941c5a4 Binary files /dev/null and b/sim/resources/stompymicro/meshes/ankle_pitch_right.stl differ diff --git a/sim/resources/stompymicro/meshes/foot_left.stl b/sim/resources/stompymicro/meshes/foot_left.stl new file mode 100644 index 00000000..51ea814e Binary files /dev/null and b/sim/resources/stompymicro/meshes/foot_left.stl differ diff --git a/sim/resources/stompymicro/meshes/foot_right.stl b/sim/resources/stompymicro/meshes/foot_right.stl new file mode 100644 index 00000000..b9a47a14 Binary files /dev/null and b/sim/resources/stompymicro/meshes/foot_right.stl differ diff --git a/sim/resources/stompymicro/meshes/hip_roll_left.stl b/sim/resources/stompymicro/meshes/hip_roll_left.stl new file mode 100644 index 00000000..29ba4696 Binary files /dev/null and b/sim/resources/stompymicro/meshes/hip_roll_left.stl differ diff --git a/sim/resources/stompymicro/meshes/hip_roll_right.stl b/sim/resources/stompymicro/meshes/hip_roll_right.stl new file mode 100644 index 00000000..7553906c Binary files /dev/null and b/sim/resources/stompymicro/meshes/hip_roll_right.stl differ diff --git a/sim/resources/stompymicro/meshes/hip_yaw_left.stl b/sim/resources/stompymicro/meshes/hip_yaw_left.stl new file mode 100644 index 00000000..23518808 Binary files /dev/null and b/sim/resources/stompymicro/meshes/hip_yaw_left.stl differ diff --git a/sim/resources/stompymicro/meshes/hip_yaw_right.stl b/sim/resources/stompymicro/meshes/hip_yaw_right.stl new file mode 100644 index 00000000..983bb89a Binary files /dev/null and b/sim/resources/stompymicro/meshes/hip_yaw_right.stl differ diff --git a/sim/resources/stompymicro/meshes/knee_pitch_left.stl b/sim/resources/stompymicro/meshes/knee_pitch_left.stl new file mode 100644 index 00000000..e36d7813 Binary files /dev/null and b/sim/resources/stompymicro/meshes/knee_pitch_left.stl differ diff --git a/sim/resources/stompymicro/meshes/knee_pitch_right.stl b/sim/resources/stompymicro/meshes/knee_pitch_right.stl new file mode 100644 index 00000000..3af8e450 Binary files /dev/null and b/sim/resources/stompymicro/meshes/knee_pitch_right.stl differ diff --git a/sim/resources/stompymicro/meshes/left_hand.stl b/sim/resources/stompymicro/meshes/left_hand.stl new file mode 100644 index 00000000..85b70197 Binary files /dev/null and b/sim/resources/stompymicro/meshes/left_hand.stl differ diff --git a/sim/resources/stompymicro/meshes/left_shoulder_yaw.stl b/sim/resources/stompymicro/meshes/left_shoulder_yaw.stl new file mode 100644 index 00000000..09a86ba1 Binary files /dev/null and b/sim/resources/stompymicro/meshes/left_shoulder_yaw.stl differ diff --git a/sim/resources/stompymicro/meshes/left_shoulder_yaw_motor.stl b/sim/resources/stompymicro/meshes/left_shoulder_yaw_motor.stl new file mode 100644 index 00000000..61c85e92 Binary files /dev/null and b/sim/resources/stompymicro/meshes/left_shoulder_yaw_motor.stl differ diff --git a/sim/resources/stompymicro/meshes/right_hand.stl b/sim/resources/stompymicro/meshes/right_hand.stl new file mode 100644 index 00000000..5d665fc1 Binary files /dev/null and b/sim/resources/stompymicro/meshes/right_hand.stl differ diff --git a/sim/resources/stompymicro/meshes/right_shoulder_yaw.stl b/sim/resources/stompymicro/meshes/right_shoulder_yaw.stl new file mode 100644 index 00000000..1c927383 Binary files /dev/null and b/sim/resources/stompymicro/meshes/right_shoulder_yaw.stl differ diff --git a/sim/resources/stompymicro/meshes/right_shoulder_yaw_motor.stl b/sim/resources/stompymicro/meshes/right_shoulder_yaw_motor.stl new file mode 100644 index 00000000..7544f2ef Binary files /dev/null and b/sim/resources/stompymicro/meshes/right_shoulder_yaw_motor.stl differ diff --git a/sim/resources/stompymicro/robot.urdf b/sim/resources/stompymicro/robot.urdf index f53f5de4..b100b953 100644 --- a/sim/resources/stompymicro/robot.urdf +++ b/sim/resources/stompymicro/robot.urdf @@ -122,7 +122,7 @@ - + @@ -151,7 +151,7 @@ - + @@ -209,10 +209,10 @@ - + - + @@ -238,10 +238,10 @@ - + - + @@ -270,7 +270,7 @@ - + @@ -299,7 +299,7 @@ - + @@ -325,26 +325,26 @@ - + - + - + - + - + - + - + @@ -354,26 +354,26 @@ - + - + - + - + - + - + - + @@ -382,27 +382,27 @@ - - + + - + - + - + - + - + @@ -411,27 +411,27 @@ - - + + - + - + - + - + - + @@ -441,8 +441,8 @@ - - + + @@ -470,8 +470,8 @@ - - + + diff --git a/sim/resources/stompymicro/robot_fixed.urdf b/sim/resources/stompymicro/robot_fixed.urdf index 90bd61c3..821346e2 100644 --- a/sim/resources/stompymicro/robot_fixed.urdf +++ b/sim/resources/stompymicro/robot_fixed.urdf @@ -1,500 +1,501 @@ - - - + + + - - - + + + - + - + - + - + - + - - - + + + + - - - - - + + + + + - + - + - + - + - + - - - + + + - - - - - + + + + + - + - + - + - + - + - - - + + + - - - - - + + + + + - + - + - + - + - + - - - + + + - - - - - + + + + + - + - + - + - + - + - - - + + + - - - - - + + + + + - + - + - + - + - + - - - + + + - - - - - + + + + + - + - + - + - + - + - - - + + + - - - - - + + + + + - + - + - + - + - + - - - + + + - - - - - + + + + + - + - + - + - + - + - - - + + + - - - - - - + + + + + + - + - + - + - + - + - - - + + + - - - - - + + + + + - + - + - + - + - + - - - + + + - - - - - + + + + + - + - + - + - - + + - + - + - - - + + + - - - - - + + + + + - + - + - + - - + + - + - + - - - + + + - - - - - - + + + + + + - + - + - + - - + + - + - + - - - + + + - - - - - - + + + + + + - + - + - + - - + + - + - + - - - + + + - - - - - + + + + + - + - + - + - + - + - - - + + + - - - - - + + + + + - + - + - + - + - + - - - + + + - \ No newline at end of file +