Skip to content

Commit

Permalink
Fix to cfg path to run from root perls2 directory.
Browse files Browse the repository at this point in the history
  • Loading branch information
danfeiX committed Feb 1, 2021
1 parent 396925a commit 0783d3e
Show file tree
Hide file tree
Showing 7 changed files with 20 additions and 16 deletions.
4 changes: 2 additions & 2 deletions cfg/default_panda_controller.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@ panda_controller:
damping: 1.0 #1 #0.5 #1 #0.5 # Damping factor [0,1]
input_max: 1.0
input_min: -1.0
output_max: 0.05
output_min: -0.05
output_max: 0.1
output_min: -0.1
JointVelocity:
kv: 10.0 #0.5
input_max: 1.0
Expand Down
2 changes: 1 addition & 1 deletion demos/demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def __init__(self,
ctrl_type,
demo_type,
test_fn,
config_file="demo_control_cfg.yaml",
config_file="demos/demo_control_cfg.yaml",
**kwargs):
self.ctrl_type = ctrl_type
self.config_file = config_file
Expand Down
8 changes: 4 additions & 4 deletions demos/joint_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ class JointSpaceDemo(Demo):
def __init__(self,
ctrl_type,
demo_type,
config_file="demo_control_cfg.yaml",
config_file="demos/demo_control_cfg.yaml",
delta_val=0.005, num_steps=30,
test_fn='set_joint_delta',
**kwargs):
Expand Down Expand Up @@ -208,7 +208,7 @@ class SingleJointDemo(JointSpaceDemo):
"""Joint Space demonstration for a single joint.
"""
def __init__(self,
config_file="demo_control_cfg.yaml",
config_file="demos/demo_control_cfg.yaml",
delta_val=0.005, num_steps=30,
test_fn='set_joint_delta',
joint_num=6,
Expand Down Expand Up @@ -248,7 +248,7 @@ class GravityCompDemo(JointSpaceDemo):
"""Gravity Compensation to demonstrate "floating" arm.
"""
def __init__(self,
config_file="demo_control_cfg.yaml",
config_file="demos/demo_control_cfg.yaml",
num_steps=100,
**kwargs):

Expand Down Expand Up @@ -280,7 +280,7 @@ def get_action(self, goal_state, current_state):

class JointImpDemoSeq(SingleJointDemo):
def __init__(self,
config_file="demo_control_cfg.yaml",
config_file="demos/demo_control_cfg.yaml",
delta_val=0.005, num_steps=30,
test_fn='set_joint_delta',
**kwargs):
Expand Down
2 changes: 0 additions & 2 deletions examples/simple_reach/simple_reach_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,7 @@
import math
import numpy as np
from perls2.utils.yaml_config import YamlConfig

from perls2.envs.env import Env

import gym.spaces as spaces
import logging

Expand Down
2 changes: 1 addition & 1 deletion examples/switch_sim_real_reach/run_switch_sim_real.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def get_action(observation):
return action


env = SwitchSimRealEnv('./sim_real_reach.yaml', True, None)
env = SwitchSimRealEnv('examples/switch_sim_real_reach/sim_real_reach.yaml', True, None)

# Lists for saving demonstrations
training_list = []
Expand Down
11 changes: 9 additions & 2 deletions examples/switch_sim_real_reach/sim_real_reach.yaml
Original file line number Diff line number Diff line change
@@ -1,13 +1,20 @@
# Cfg file for SimpleReachEnv
world:
type: 'Real'
robot: 'sawyer'
robot: 'panda'
controlType: 'EEImpedance'
data_dir: '../../data'
# Simulation parameters
sim_params:
time_step: 0.001 #0.004166 # 1./240.
steps_per_action: 20 # policy to sim update ratio
MAX_STEPS: 10
MAX_STEPS: 500

policy_freq: 20 # For gazebo?
control_freq: 500 # Hz

# If set up has Kinect camera
real_camera: False

# Robots are specified by types and urdf locations
# also may include intial setup like poses and orientations
Expand Down
7 changes: 3 additions & 4 deletions examples/switch_sim_real_reach/switch_sim_real_env.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import time
import math
import pybullet
import numpy as np
from perls2.utils.yaml_config import YamlConfig
import logging
Expand Down Expand Up @@ -53,8 +52,8 @@ def __init__(self,
self._initial_ee_orn = self.robot_interface.ee_orientation

# For randomizing goal position in real world cases.
self.goal_upper_bound = np.add(self.reset_position, [0.1, 0.0, 0])
self.goal_lower_bound = np.add(self.reset_position, [-0.1, -0.0, 0])
self.goal_upper_bound = np.add(self.reset_position, [0.2, 0.0, 0])
self.goal_lower_bound = np.add(self.reset_position, [0.1, -0.0, 0])

def reset(self):
"""Reset the environment.
Expand Down Expand Up @@ -208,7 +207,7 @@ def _check_termination(self):
print("Done - success!")
return True
if (self.num_steps > self.MAX_STEPS):
print("Done - max steps reached.")
print("Done - max steps reached.\n Final dist to goal \t{}".format(abs_dist))
return True
else:
return False
Expand Down

0 comments on commit 0783d3e

Please sign in to comment.