Skip to content

Commit

Permalink
GAC: new implementation and jerk changes (commaai#95)
Browse files Browse the repository at this point in the history
* GAC: Harald's method

* unused

* match harald's

* Revert "match harald's"

This reverts commit f2015eb2d8ffe80f8e02ffb92bd1d244dbfdb38c.

* try this

* Revert "try this"

This reverts commit 993adce26f594c5d2da1752657fe3af21c17cd27.

* do this instead

* fix

* cleanup

* align with harald's method

* fix init at 0

* use cereal

* unused

* maniac, aggro, stock

* unused

* fake lead when no lead

* print desired following distance from lead_0

* default at 3

* update carstate obj

* clip this

* use actual profiles

* cleanup

* scale maniac text smaller

* should be 1
  • Loading branch information
sunnyhaibin committed Jun 8, 2023
1 parent 780c831 commit 869573d
Show file tree
Hide file tree
Showing 3 changed files with 50 additions and 39 deletions.
69 changes: 35 additions & 34 deletions selfdrive/controls/lib/longitudinal_mpc_lib/long_mpc.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
#!/usr/bin/env python3
import os
import numpy as np

from cereal import log
from common.realtime import sec_since_boot
from common.numpy_fast import clip, interp
from common.numpy_fast import clip
from system.swaglog import cloudlog
# WARNING: imports outside of constants will not trigger a rebuild
from selfdrive.modeld.constants import index_function
Expand Down Expand Up @@ -54,17 +54,37 @@
T_DIFFS = np.diff(T_IDXS, prepend=[0.])
MIN_ACCEL = -3.5
MAX_ACCEL = 2.0
T_FOLLOW = 1.45
COMFORT_BRAKE = 2.5
STOP_DISTANCE = 6.0

def get_jerk_factor(personality=log.LongitudinalPersonality.standard):
if personality == log.LongitudinalPersonality.standard:
return 1.0
elif personality == log.LongitudinalPersonality.moderate:
return 0.5
elif personality == log.LongitudinalPersonality.aggressive:
return 0.222
else:
raise NotImplementedError("Longitudinal personality not supported")


def get_T_FOLLOW(personality=log.LongitudinalPersonality.standard):
if personality == log.LongitudinalPersonality.standard:
return 1.45
elif personality == log.LongitudinalPersonality.moderate:
return 1.25
elif personality == log.LongitudinalPersonality.aggressive:
return 1.0
else:
raise NotImplementedError("Longitudinal personality not supported")

def get_stopped_equivalence_factor(v_lead):
return (v_lead**2) / (2 * COMFORT_BRAKE)

def get_safe_obstacle_distance(v_ego, t_follow=T_FOLLOW):
def get_safe_obstacle_distance(v_ego, t_follow):
return (v_ego**2) / (2 * COMFORT_BRAKE) + t_follow * v_ego + STOP_DISTANCE

def desired_follow_distance(v_ego, v_lead, t_follow=T_FOLLOW):
def desired_follow_distance(v_ego, v_lead, t_follow=get_T_FOLLOW()):
return get_safe_obstacle_distance(v_ego, t_follow) - get_stopped_equivalence_factor(v_lead)


Expand Down Expand Up @@ -161,7 +181,7 @@ def gen_long_ocp():

x0 = np.zeros(X_DIM)
ocp.constraints.x0 = x0
ocp.parameter_values = np.array([-1.2, 1.2, 0.0, 0.0, T_FOLLOW, LEAD_DANGER_FACTOR])
ocp.parameter_values = np.array([-1.2, 1.2, 0.0, 0.0, get_T_FOLLOW(), LEAD_DANGER_FACTOR])

# We put all constraint cost weights to 0 and only set them at runtime
cost_weights = np.zeros(CONSTR_DIM)
Expand Down Expand Up @@ -201,7 +221,7 @@ class LongitudinalMpc:
def __init__(self, mode='acc'):
self.mode = mode
self.solver = AcadosOcpSolverCython(MODEL_NAME, ACADOS_SOLVER_TYPE, N)
self.desired_TF = T_FOLLOW
self.desired_TF = get_T_FOLLOW()
self.reset()
self.source = SOURCES[2]

Expand Down Expand Up @@ -252,21 +272,12 @@ def set_cost_weights(self, cost_weights, constraint_cost_weights):
for i in range(N):
self.solver.cost_set(i, 'Zl', Zl)

def get_cost_multipliers(self):
TFs = [1.0, 1.25, T_FOLLOW]
# KRKeegan adjustments to costs for different TFs
# these were calculated using the test_longitudinal.py deceleration tests
a_change_tf = interp(self.desired_TF, TFs, [.1, .8, 1.])
j_ego_tf = interp(self.desired_TF, TFs, [.6, .8, 1.])
d_zone_tf = interp(self.desired_TF, TFs, [1.6, 1.3, 1.])
return a_change_tf, j_ego_tf, d_zone_tf

def set_weights(self, prev_accel_constraint=True):
def set_weights(self, prev_accel_constraint=True, personality=log.LongitudinalPersonality.standard):
jerk_factor = get_jerk_factor(personality)
if self.mode == 'acc':
cost_multipliers = self.get_cost_multipliers()
a_change_cost = A_CHANGE_COST if prev_accel_constraint else 0
cost_weights = [X_EGO_OBSTACLE_COST, X_EGO_COST, V_EGO_COST, A_EGO_COST, a_change_cost * cost_multipliers[0], J_EGO_COST * cost_multipliers[1]]
constraint_cost_weights = [LIMIT_COST, LIMIT_COST, LIMIT_COST, DANGER_ZONE_COST * cost_multipliers[2]]
cost_weights = [X_EGO_OBSTACLE_COST, X_EGO_COST, V_EGO_COST, A_EGO_COST, jerk_factor * a_change_cost, jerk_factor * J_EGO_COST]
constraint_cost_weights = [LIMIT_COST, LIMIT_COST, LIMIT_COST, DANGER_ZONE_COST]
elif self.mode == 'blended':
a_change_cost = 40.0 if prev_accel_constraint else 0
cost_weights = [0., 0.1, 0.2, 5.0, a_change_cost, 1.0]
Expand Down Expand Up @@ -320,24 +331,14 @@ def set_accel_limits(self, min_a, max_a):
self.cruise_min_a = min_a
self.max_a = max_a

def update_TF(self, carstate):
gac_tr = carstate.gapAdjustCruiseTr
if gac_tr == 1:
self.desired_TF = 1.0
elif gac_tr == 2:
self.desired_TF = 1.25
else:
self.desired_TF = T_FOLLOW

def update(self, carstate, radarstate, v_cruise, x, v, a, j):
def update(self, radarstate, v_cruise, x, v, a, j, personality=log.LongitudinalPersonality.standard):
self.desired_TF = get_T_FOLLOW(personality)
v_ego = self.x0[1]
self.status = radarstate.leadOne.status or radarstate.leadTwo.status

lead_xv_0 = self.process_lead(radarstate.leadOne)
lead_xv_1 = self.process_lead(radarstate.leadTwo)

self.update_TF(carstate)

# To estimate a safe distance from a moving lead, we calculate how much stopping
# distance that lead needs as a minimum. We can add that to the current distance
# and then treat that as a stopped car/obstacle at this new distance.
Expand Down Expand Up @@ -413,9 +414,9 @@ def update(self, carstate, radarstate, v_cruise, x, v, a, j):
# Check if it got within lead comfort range
# TODO This should be done cleaner
if self.mode == 'blended':
if any((lead_0_obstacle - get_safe_obstacle_distance(self.x_sol[:,1], T_FOLLOW))- self.x_sol[:,0] < 0.0):
if any((lead_0_obstacle - get_safe_obstacle_distance(self.x_sol[:,1], self.desired_TF))- self.x_sol[:,0] < 0.0):
self.source = 'lead0'
if any((lead_1_obstacle - get_safe_obstacle_distance(self.x_sol[:,1], T_FOLLOW))- self.x_sol[:,0] < 0.0) and \
if any((lead_1_obstacle - get_safe_obstacle_distance(self.x_sol[:,1], self.desired_TF))- self.x_sol[:,0] < 0.0) and \
(lead_1_obstacle[0] - lead_0_obstacle[0]):
self.source = 'lead1'

Expand Down
14 changes: 12 additions & 2 deletions selfdrive/controls/lib/longitudinal_planner.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import math
import numpy as np
from common.numpy_fast import clip, interp
from cereal import log

import cereal.messaging as messaging
from cereal import car
Expand Down Expand Up @@ -71,6 +72,13 @@ def __init__(self, CP, init_v=0.0, init_a=0.0):
self.events = Events()
self.turn_speed_controller = TurnSpeedController()

self.read_gac(gac_tr=3)
self.personality = log.LongitudinalPersonality.standard

def read_gac(self, gac_tr=3):
gac_tr = clip(gac_tr, 1, 3)
self.personality = int(gac_tr - 1)

@staticmethod
def parse_model(model_msg, model_error):
if (len(model_msg.position.x) == 33 and
Expand All @@ -88,6 +96,7 @@ def parse_model(model_msg, model_error):
return x, v, a, j

def update(self, sm):
self.read_gac(gac_tr=sm['carState'].gapAdjustCruiseTr)
self.mpc.mode = 'blended' if sm['controlsState'].experimentalMode else 'acc'

v_ego = sm['carState'].vEgo
Expand Down Expand Up @@ -133,11 +142,11 @@ def update(self, sm):
accel_limits_turns[0] = min(accel_limits_turns[0], self.a_desired + 0.05, a_min_sol)
accel_limits_turns[1] = max(accel_limits_turns[1], self.a_desired - 0.05)

self.mpc.set_weights(prev_accel_constraint)
self.mpc.set_weights(prev_accel_constraint, personality=self.personality)
self.mpc.set_accel_limits(accel_limits_turns[0], accel_limits_turns[1])
self.mpc.set_cur_state(self.v_desired_filter.x, self.a_desired)
x, v, a, j = self.parse_model(sm['modelV2'], self.v_model_error)
self.mpc.update(sm['carState'], sm['radarState'], v_cruise_sol, x, v, a, j)
self.mpc.update(sm['radarState'], v_cruise_sol, x, v, a, j, personality=self.personality)

self.v_desired_trajectory_full = np.interp(T_IDXS, T_IDXS_MPC, self.mpc.v_solution)
self.a_desired_trajectory_full = np.interp(T_IDXS, T_IDXS_MPC, self.mpc.a_solution)
Expand Down Expand Up @@ -175,6 +184,7 @@ def publish(self, sm, pm):
longitudinalPlan.fcw = self.fcw

longitudinalPlan.solverExecutionTime = self.mpc.solve_time
longitudinalPlan.personality = self.personality

longitudinalPlan.e2eX = self.mpc.e2e_x.tolist()
longitudinalPlan.desiredTF = self.mpc.desired_TF
Expand Down
6 changes: 3 additions & 3 deletions selfdrive/ui/qt/onroad.cc
Original file line number Diff line number Diff line change
Expand Up @@ -973,10 +973,10 @@ void AnnotatedCameraWidget::drawGacButton(QPainter &p, int x, int y, int w, int
if (prev_gac_tr != gacTr) {
prev_gac_tr = gacTr;
if (gacTr == 1) {
gac_text = "Aggro\nGap";
gac_text = "Maniac\nGap";
gac_border = QColor(255, 75, 75, (255 * btnPerc));
} else if (gacTr == 2) {
gac_text = "Mild\nGap";
gac_text = "Aggro\nGap";
gac_border = QColor(252, 255, 75, (255 * btnPerc));
} else {
gac_text = "Stock\nGap";
Expand All @@ -989,7 +989,7 @@ void AnnotatedCameraWidget::drawGacButton(QPainter &p, int x, int y, int w, int
p.setBrush(QColor(75, 75, 75, (75 * btnPerc)));
p.drawEllipse(gacBtn);
p.setPen(QColor(255, 255, 255, (255 * btnPerc)));
configFont(p, "Inter", 36, "SemiBold");
configFont(p, "Inter", gacTr == 1 ? 32 : 36, "SemiBold");
p.drawText(gacBtn, Qt::AlignCenter, gac_text);
}

Expand Down

0 comments on commit 869573d

Please sign in to comment.