@@ -244,8 +244,10 @@ def set_weights_for_lead_policy(self):
244
244
x_ego_obstacle_cost_multiplier = 1 # interp(self.desired_TR, TRs, [3., 1.0, 0.1])
245
245
j_ego_cost_multiplier = 1 # interp(self.desired_TR, TRs, [0.5, 1.0, 1.0])
246
246
d_zone_cost_multiplier = 1 # interp(self.desired_TR, TRs, [4., 1.0, 1.0])
247
+ _J_EGO_COST = J_EGO_COST * interp (self .v_ego , [0.5 , 1.0 ], [0.1 , 1.0 ])
248
+ _A_CHANGE_COST = A_CHANGE_COST * interp (self .v_ego , [0.5 , 1.0 ], [0.1 , 1.0 ])
247
249
248
- W = np .asfortranarray (np .diag ([X_EGO_OBSTACLE_COST * x_ego_obstacle_cost_multiplier , X_EGO_COST , V_EGO_COST , A_EGO_COST , A_CHANGE_COST , J_EGO_COST * j_ego_cost_multiplier ]))
250
+ W = np .asfortranarray (np .diag ([X_EGO_OBSTACLE_COST * x_ego_obstacle_cost_multiplier , X_EGO_COST , V_EGO_COST , A_EGO_COST , _A_CHANGE_COST , _J_EGO_COST ]))
249
251
for i in range (N ):
250
252
W [4 ,4 ] = A_CHANGE_COST * np .interp (T_IDXS [i ], [0.0 , 1.0 , 2.0 ], [1.0 , 1.0 , 0.0 ])
251
253
self .solver .cost_set (i , 'W' , W )
0 commit comments