新增 `DistractionDetectionLevel` 参数以控制驾驶员分心检测的灵敏度等级,并在 `dmonitoringd.py` 和 `helpers.py` 中实现不同等级对应的时间阈值配置。同时更新了相关逻辑以支持动态调整该参数。 fix(toyota): 支持 Toyota Wildlander PHEV 车型接入与控制 增加对 Toyota Wildlander PHEV 的指纹识别、车辆规格定义及接口适配,确保其在 TSS2 平台下的正常运行,并修正部分雷达ACC判断条件。 feat(ui): 优化 Dragonpilot 设置界面选项显示语言一致性 将 Dragonpilot 设置页面中的多个下拉选项文本进行国际化处理,统一使用翻译函数包裹,提升多语言兼容性。 chore(config): 更新 launch 脚本 API 地址并切换 shell 解释器 修改 `launch_openpilot.sh` 使用 `/usr/bin/bash` 作为解释器,并设置自定义 API 与 Athena 服务地址。 refactor(key): 实现 ECU 秘钥提取脚本并写入参数存储 创建 `key.py` 脚本用于通过 UDS 协议从 ECU 提取 SecOC 密钥,并将其保存至系统参数中供后续使用。 docs(vscode): 移除不再使用的终端配置项 清理 `.vscode/settings.json` 文件中过时的 terminal 配置内容。 feat(fonts): 新增中文字体资源文件 添加 `china.ttf` 字体文件以增强 UI 在中文环境下的渲染效果。 build(payload): 添加二进制负载文件 引入新的二进制 payload 文件用于辅助密钥提取流程。
304 lines
12 KiB
Python
304 lines
12 KiB
Python
import os
|
|
import time
|
|
import numpy as np
|
|
import pyray as rl
|
|
from cereal import log, messaging
|
|
from msgq.visionipc import VisionStreamType
|
|
from openpilot.selfdrive.ui.ui_state import ui_state, UIStatus, UI_BORDER_SIZE
|
|
from openpilot.selfdrive.ui.onroad.alert_renderer import AlertRenderer
|
|
from openpilot.selfdrive.ui.onroad.driver_state import DriverStateRenderer
|
|
from openpilot.selfdrive.ui.onroad.hud_renderer import HudRenderer
|
|
from openpilot.selfdrive.ui.onroad.model_renderer import ModelRenderer
|
|
from openpilot.selfdrive.ui.onroad.cameraview import CameraView
|
|
from openpilot.system.ui.lib.application import gui_app
|
|
from openpilot.common.transformations.camera import DEVICE_CAMERAS, DeviceCameraConfig, view_frame_from_device_frame
|
|
from openpilot.common.transformations.orientation import rot_from_euler
|
|
|
|
OpState = log.SelfdriveState.OpenpilotState
|
|
CALIBRATED = log.LiveCalibrationData.Status.calibrated
|
|
ROAD_CAM = VisionStreamType.VISION_STREAM_ROAD
|
|
WIDE_CAM = VisionStreamType.VISION_STREAM_WIDE_ROAD
|
|
DEFAULT_DEVICE_CAMERA = DEVICE_CAMERAS["tici", "ar0231"]
|
|
|
|
BORDER_COLORS = {
|
|
UIStatus.DISENGAGED: rl.Color(0x12, 0x28, 0x39, 0xFF), # Blue for disengaged state
|
|
UIStatus.OVERRIDE: rl.Color(0x89, 0x92, 0x8D, 0xFF), # Gray for override state
|
|
UIStatus.ENGAGED: rl.Color(0x16, 0x7F, 0x40, 0xFF), # Green for engaged state
|
|
UIStatus.ALKA: rl.Color(0x22, 0xa0, 0xdc, 0xf1), # Blue for ALKA state
|
|
}
|
|
|
|
WIDE_CAM_MAX_SPEED = 10.0 # m/s (22 mph)
|
|
ROAD_CAM_MIN_SPEED = 15.0 # m/s (34 mph)
|
|
|
|
# dp
|
|
DP_INDICATOR_BLINK_RATE_FAST = int(gui_app.target_fps * 0.25)
|
|
DP_INDICATOR_BLINK_RATE_STD = int(gui_app.target_fps * 0.5)
|
|
DP_INDICATOR_COLOR_BSM = rl.Color(255, 255, 0, 255)
|
|
DP_INDICATOR_COLOR_BLINKER = rl.Color(0, 255, 0, 255)
|
|
|
|
|
|
class AugmentedRoadView(CameraView):
|
|
def __init__(self, stream_type: VisionStreamType = VisionStreamType.VISION_STREAM_ROAD):
|
|
super().__init__("camerad", stream_type)
|
|
self._set_placeholder_color(BORDER_COLORS[UIStatus.DISENGAGED])
|
|
|
|
self.device_camera: DeviceCameraConfig | None = None
|
|
self.view_from_calib = view_frame_from_device_frame.copy()
|
|
self.view_from_wide_calib = view_frame_from_device_frame.copy()
|
|
|
|
self._last_calib_time: float = 0
|
|
self._last_rect_dims = (0.0, 0.0)
|
|
self._last_stream_type = stream_type
|
|
self._cached_matrix: np.ndarray | None = None
|
|
self._content_rect = rl.Rectangle()
|
|
|
|
self.model_renderer = ModelRenderer()
|
|
self._hud_renderer = HudRenderer()
|
|
self.alert_renderer = AlertRenderer()
|
|
self.driver_state_renderer = DriverStateRenderer()
|
|
|
|
# DP border indicator
|
|
self._dp_indicator_show_left = False
|
|
self._dp_indicator_show_right = False
|
|
self._dp_indicator_count_left = 0
|
|
self._dp_indicator_count_right = 0
|
|
self._dp_indicator_color_left = rl.Color(0, 0, 0, 0)
|
|
self._dp_indicator_color_right = rl.Color(0, 0, 0, 0)
|
|
|
|
# debug
|
|
self._pm = messaging.PubMaster(['uiDebug'])
|
|
|
|
def _render(self, rect):
|
|
# Only render when system is started to avoid invalid data access
|
|
start_draw = time.monotonic()
|
|
if not ui_state.started:
|
|
return
|
|
|
|
self._update_dp_indicator_states(ui_state.sm)
|
|
self._switch_stream_if_needed(ui_state.sm)
|
|
|
|
# Update calibration before rendering
|
|
self._update_calibration()
|
|
|
|
# Create inner content area with border padding
|
|
self._content_rect = rl.Rectangle(
|
|
rect.x + UI_BORDER_SIZE,
|
|
rect.y + UI_BORDER_SIZE,
|
|
rect.width - 2 * UI_BORDER_SIZE,
|
|
rect.height - 2 * UI_BORDER_SIZE,
|
|
)
|
|
|
|
# Enable scissor mode to clip all rendering within content rectangle boundaries
|
|
# This creates a rendering viewport that prevents graphics from drawing outside the border
|
|
rl.begin_scissor_mode(
|
|
int(self._content_rect.x),
|
|
int(self._content_rect.y),
|
|
int(self._content_rect.width),
|
|
int(self._content_rect.height)
|
|
)
|
|
|
|
# Render the base camera view
|
|
super()._render(rect)
|
|
|
|
hide_hud = False
|
|
if ui_state.dp_ui_hide_hud_speed_ms > 0. and ui_state.sm['carState'].vEgo > ui_state.dp_ui_hide_hud_speed_ms:
|
|
hide_hud = True
|
|
|
|
# Draw all UI overlays
|
|
self.model_renderer.render(self._content_rect)
|
|
if not hide_hud:
|
|
self._hud_renderer.render(self._content_rect)
|
|
self.alert_renderer.render(self._content_rect)
|
|
if "LITE" not in os.environ:
|
|
if not hide_hud:
|
|
self.driver_state_renderer.render(self._content_rect)
|
|
|
|
# Custom UI extension point - add custom overlays here
|
|
# Use self._content_rect for positioning within camera bounds
|
|
|
|
# End clipping region
|
|
rl.end_scissor_mode()
|
|
|
|
# Draw colored border based on driving state
|
|
self._draw_border(rect)
|
|
|
|
# publish uiDebug
|
|
msg = messaging.new_message('uiDebug')
|
|
msg.uiDebug.drawTimeMillis = (time.monotonic() - start_draw) * 1000
|
|
self._pm.send('uiDebug', msg)
|
|
|
|
def _handle_mouse_press(self, _):
|
|
if not self._hud_renderer.user_interacting() and self._click_callback is not None:
|
|
self._click_callback()
|
|
|
|
def _handle_mouse_release(self, _):
|
|
# We only call click callback on press if not interacting with HUD
|
|
pass
|
|
|
|
def _draw_border(self, rect: rl.Rectangle):
|
|
rl.draw_rectangle_lines_ex(rect, UI_BORDER_SIZE, rl.BLACK)
|
|
border_roundness = 0.12
|
|
border_color = BORDER_COLORS.get(ui_state.status, BORDER_COLORS[UIStatus.DISENGAGED])
|
|
if ui_state.dp_alka_active and ui_state.status == UIStatus.DISENGAGED:
|
|
border_color = BORDER_COLORS[UIStatus.ALKA]
|
|
border_rect = rl.Rectangle(rect.x + UI_BORDER_SIZE, rect.y + UI_BORDER_SIZE,
|
|
rect.width - 2 * UI_BORDER_SIZE, rect.height - 2 * UI_BORDER_SIZE)
|
|
rl.draw_rectangle_rounded_lines_ex(border_rect, border_roundness, 10, UI_BORDER_SIZE, border_color)
|
|
|
|
# dp - Side indicators
|
|
indicator_y = int(rect.y+4*UI_BORDER_SIZE)
|
|
indicator_height = int(rect.height-8*UI_BORDER_SIZE)
|
|
if self._dp_indicator_show_left:
|
|
rl.draw_rectangle(int(rect.x), indicator_y, UI_BORDER_SIZE, indicator_height, self._dp_indicator_color_left)
|
|
if self._dp_indicator_show_right:
|
|
rl.draw_rectangle(int(rect.x + rect.width-UI_BORDER_SIZE), indicator_y, UI_BORDER_SIZE, indicator_height, self._dp_indicator_color_right)
|
|
|
|
def _switch_stream_if_needed(self, sm):
|
|
if sm['selfdriveState'].experimentalMode and WIDE_CAM in self.available_streams:
|
|
v_ego = sm['carState'].vEgo
|
|
if v_ego < WIDE_CAM_MAX_SPEED:
|
|
target = WIDE_CAM
|
|
elif v_ego > ROAD_CAM_MIN_SPEED:
|
|
target = ROAD_CAM
|
|
else:
|
|
# Hysteresis zone - keep current stream
|
|
target = self.stream_type
|
|
else:
|
|
target = ROAD_CAM
|
|
|
|
if self.stream_type != target:
|
|
self.switch_stream(target)
|
|
|
|
def _update_calibration(self):
|
|
# Update device camera if not already set
|
|
sm = ui_state.sm
|
|
if not self.device_camera and sm.seen['roadCameraState'] and sm.seen['deviceState']:
|
|
self.device_camera = DEVICE_CAMERAS[(str(sm['deviceState'].deviceType), str(sm['roadCameraState'].sensor))]
|
|
|
|
# Check if live calibration data is available and valid
|
|
if not (sm.updated["liveCalibration"] and sm.valid['liveCalibration']):
|
|
return
|
|
|
|
calib = sm['liveCalibration']
|
|
if len(calib.rpyCalib) != 3 or calib.calStatus != CALIBRATED:
|
|
return
|
|
|
|
# Update view_from_calib matrix
|
|
device_from_calib = rot_from_euler(calib.rpyCalib)
|
|
self.view_from_calib = view_frame_from_device_frame @ device_from_calib
|
|
|
|
# Update wide calibration if available
|
|
if hasattr(calib, 'wideFromDeviceEuler') and len(calib.wideFromDeviceEuler) == 3:
|
|
wide_from_device = rot_from_euler(calib.wideFromDeviceEuler)
|
|
self.view_from_wide_calib = view_frame_from_device_frame @ wide_from_device @ device_from_calib
|
|
|
|
def _calc_frame_matrix(self, rect: rl.Rectangle) -> np.ndarray:
|
|
# Check if we can use cached matrix
|
|
calib_time = ui_state.sm.recv_frame['liveCalibration']
|
|
current_dims = (self._content_rect.width, self._content_rect.height)
|
|
if (self._last_calib_time == calib_time and
|
|
self._last_rect_dims == current_dims and
|
|
self._last_stream_type == self.stream_type and
|
|
self._cached_matrix is not None):
|
|
return self._cached_matrix
|
|
|
|
# Get camera configuration
|
|
device_camera = self.device_camera or DEFAULT_DEVICE_CAMERA
|
|
is_wide_camera = self.stream_type == WIDE_CAM
|
|
intrinsic = device_camera.ecam.intrinsics if is_wide_camera else device_camera.fcam.intrinsics
|
|
calibration = self.view_from_wide_calib if is_wide_camera else self.view_from_calib
|
|
zoom = 2.0 if is_wide_camera else 1.1
|
|
|
|
# Calculate transforms for vanishing point
|
|
inf_point = np.array([1000.0, 0.0, 0.0])
|
|
calib_transform = intrinsic @ calibration
|
|
kep = calib_transform @ inf_point
|
|
|
|
# Calculate center points and dimensions
|
|
x, y = self._content_rect.x, self._content_rect.y
|
|
w, h = self._content_rect.width, self._content_rect.height
|
|
cx, cy = intrinsic[0, 2], intrinsic[1, 2]
|
|
|
|
# Calculate max allowed offsets with margins
|
|
margin = 5
|
|
max_x_offset = cx * zoom - w / 2 - margin
|
|
max_y_offset = cy * zoom - h / 2 - margin
|
|
|
|
# Calculate and clamp offsets to prevent out-of-bounds issues
|
|
try:
|
|
if abs(kep[2]) > 1e-6:
|
|
x_offset = np.clip((kep[0] / kep[2] - cx) * zoom, -max_x_offset, max_x_offset)
|
|
y_offset = np.clip((kep[1] / kep[2] - cy) * zoom, -max_y_offset, max_y_offset)
|
|
else:
|
|
x_offset, y_offset = 0, 0
|
|
except (ZeroDivisionError, OverflowError):
|
|
x_offset, y_offset = 0, 0
|
|
|
|
# Cache the computed transformation matrix to avoid recalculations
|
|
self._last_calib_time = calib_time
|
|
self._last_rect_dims = current_dims
|
|
self._last_stream_type = self.stream_type
|
|
self._cached_matrix = np.array([
|
|
[zoom * 2 * cx / w, 0, -x_offset / w * 2],
|
|
[0, zoom * 2 * cy / h, -y_offset / h * 2],
|
|
[0, 0, 1.0]
|
|
])
|
|
|
|
video_transform = np.array([
|
|
[zoom, 0.0, (w / 2 + x - x_offset) - (cx * zoom)],
|
|
[0.0, zoom, (h / 2 + y - y_offset) - (cy * zoom)],
|
|
[0.0, 0.0, 1.0]
|
|
])
|
|
self.model_renderer.set_transform(video_transform @ calib_transform)
|
|
|
|
return self._cached_matrix
|
|
|
|
def _update_dp_indicator_side_state(self, blinker_state, bsm_state, show_prev, count_prev):
|
|
show = show_prev
|
|
count = count_prev
|
|
color = rl.Color(0, 0, 0, 0)
|
|
|
|
if not blinker_state and not bsm_state:
|
|
show = False
|
|
count = 0
|
|
else:
|
|
count += 1
|
|
|
|
if bsm_state and blinker_state:
|
|
show = not show if count % DP_INDICATOR_BLINK_RATE_FAST == 0 else show
|
|
color = DP_INDICATOR_COLOR_BSM
|
|
elif blinker_state:
|
|
show = not show if count % DP_INDICATOR_BLINK_RATE_STD == 0 else show
|
|
color = DP_INDICATOR_COLOR_BLINKER
|
|
elif bsm_state:
|
|
show = True
|
|
color = DP_INDICATOR_COLOR_BSM
|
|
else:
|
|
show = False
|
|
|
|
return show, count, color
|
|
|
|
def _update_dp_indicator_states(self, sm):
|
|
cs = sm['carState']
|
|
self._dp_indicator_show_left, self._dp_indicator_count_left, self._dp_indicator_color_left = \
|
|
self._update_dp_indicator_side_state(cs.leftBlinker, cs.leftBlindspot,
|
|
self._dp_indicator_show_left, self._dp_indicator_count_left)
|
|
self._dp_indicator_show_right, self._dp_indicator_count_right, self._dp_indicator_color_right = \
|
|
self._update_dp_indicator_side_state(cs.rightBlinker, cs.rightBlindspot,
|
|
self._dp_indicator_show_right, self._dp_indicator_count_right)
|
|
|
|
if __name__ == "__main__":
|
|
gui_app.init_window("OnRoad Camera View")
|
|
road_camera_view = AugmentedRoadView(ROAD_CAM)
|
|
print("***press space to switch camera view***")
|
|
try:
|
|
for _ in gui_app.render():
|
|
ui_state.update()
|
|
if rl.is_key_released(rl.KeyboardKey.KEY_SPACE):
|
|
if WIDE_CAM in road_camera_view.available_streams:
|
|
stream = ROAD_CAM if road_camera_view.stream_type == WIDE_CAM else WIDE_CAM
|
|
road_camera_view.switch_stream(stream)
|
|
road_camera_view.render(rl.Rectangle(0, 0, gui_app.width, gui_app.height))
|
|
finally:
|
|
road_camera_view.close()
|