From 71ab6240f2dd8132ab29d9251690b3f8b6da0517 Mon Sep 17 00:00:00 2001 From: Alex Maclean Date: Fri, 15 Dec 2023 19:43:58 +0000 Subject: [PATCH 01/63] stm32: Fix STM32G4 USB STM32G4 USB controller requires 8 or 16-bit access, not 32-bit Signed-off-by: Alex Maclean --- src/stm32/usbfs.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/stm32/usbfs.c b/src/stm32/usbfs.c index ad2e7b3eb..5385c956c 100644 --- a/src/stm32/usbfs.c +++ b/src/stm32/usbfs.c @@ -15,7 +15,7 @@ #include "internal.h" // GPIO #include "sched.h" // DECL_INIT -#if CONFIG_MACH_STM32F1 || CONFIG_MACH_STM32G4 +#if CONFIG_MACH_STM32F1 // Transfer memory is accessed with 32bits, but contains only 16bits of data typedef volatile uint32_t epmword_t; #define WSIZE 2 @@ -25,6 +25,11 @@ typedef volatile uint16_t epmword_t; #define WSIZE 2 #define USBx_IRQn USB_IRQn +#elif CONFIG_MACH_STM32G4 + // Transfer memory is accessed with 16bits and contains 16bits of data + typedef volatile uint16_t epmword_t; + #define WSIZE 2 + #define USBx_IRQn USB_LP_IRQn #elif CONFIG_MACH_STM32G0 // Transfer memory is accessed with 32bits and contains 32bits of data typedef volatile uint32_t epmword_t; From 147492b25357e486bea35fbeb57405dcc47e53aa Mon Sep 17 00:00:00 2001 From: Alex Maclean Date: Fri, 15 Dec 2023 22:49:07 +0000 Subject: [PATCH 02/63] stm32: Fix ADC for STM32G4 At least STM32G4 requires four ADC clock cycles between hardware clearing ADCCAL and setting ADEN or the write disappears. Make a tenacious write attempt. Signed-off-by: Alex Maclean --- src/stm32/stm32h7_adc.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/stm32/stm32h7_adc.c b/src/stm32/stm32h7_adc.c index 57d4b15c7..e9dc8f845 100644 --- a/src/stm32/stm32h7_adc.c +++ b/src/stm32/stm32h7_adc.c @@ -240,9 +240,10 @@ gpio_adc_setup(uint32_t pin) // Enable ADC adc->ISR = ADC_ISR_ADRDY; adc->ISR; // Dummy read to make sure write is flushed - adc->CR |= ADC_CR_ADEN; + while (!(adc->CR & ADC_CR_ADEN)) + adc->CR |= ADC_CR_ADEN; while (!(adc->ISR & ADC_ISR_ADRDY)) - ; + ; // Set ADC clock cycles sample time for every channel uint32_t av = (aticks | (aticks << 3) | (aticks << 6) From 77619e912ca704977836485204238b17fed26b6b Mon Sep 17 00:00:00 2001 From: Alex Maclean Date: Mon, 18 Dec 2023 01:05:06 +0000 Subject: [PATCH 03/63] stm32: Fix CAN for STM32G4 Signed-off-by: Alex Maclean --- src/stm32/fdcan.c | 6 +++--- src/stm32/stm32g4.c | 3 +++ 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/stm32/fdcan.c b/src/stm32/fdcan.c index a1624f8c1..b0e8c01d1 100644 --- a/src/stm32/fdcan.c +++ b/src/stm32/fdcan.c @@ -162,10 +162,10 @@ canhw_set_filter(uint32_t id) can_filter(1, id); can_filter(2, id + 1); -#if CONFIG_MACH_STM32G0 +#if CONFIG_MACH_STM32G0 || CONFIG_MACH_STM32G4 SOC_CAN->RXGFC = ((id ? 3 : 1) << FDCAN_RXGFC_LSS_Pos | 0x02 << FDCAN_RXGFC_ANFS_Pos); -#elif CONFIG_MACH_STM32H7 || CONFIG_MAC_STM32G4 +#elif CONFIG_MACH_STM32H7 uint32_t flssa = (uint32_t)MSG_RAM.FLS - SRAMCAN_BASE; SOC_CAN->SIDFC = flssa | ((id ? 3 : 1) << FDCAN_SIDFC_LSS_Pos); SOC_CAN->GFC = 0x02 << FDCAN_GFC_ANFS_Pos; @@ -293,7 +293,7 @@ can_init(void) SOC_CAN->NBTP = btr; -#if CONFIG_MACH_STM32H7 || CONFIG_MAC_STM32G4 +#if CONFIG_MACH_STM32H7 /* Setup message RAM addresses */ uint32_t f0sa = (uint32_t)MSG_RAM.RXF0 - SRAMCAN_BASE; SOC_CAN->RXF0C = f0sa | (ARRAY_SIZE(MSG_RAM.RXF0) << FDCAN_RXF0C_F0S_Pos); diff --git a/src/stm32/stm32g4.c b/src/stm32/stm32g4.c index aed9ed8fa..139ea8eaa 100644 --- a/src/stm32/stm32g4.c +++ b/src/stm32/stm32g4.c @@ -105,6 +105,9 @@ enable_clock_stm32g4(void) enable_pclock(CRS_BASE); CRS->CR |= CRS_CR_AUTOTRIMEN | CRS_CR_CEN; } + + // Use PCLK for FDCAN + RCC->CCIPR = 2 << RCC_CCIPR_FDCANSEL_Pos; } // Main clock setup called at chip startup From fe56bf36c920546fe27e21fa45be220ac764f67b Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Tue, 26 Dec 2023 11:18:40 -0500 Subject: [PATCH 04/63] toolhead: Fix _calc_print_time() after G4 and SET_PRESSURE_ADVANCE Commit b7b13588 changed the internal flush time tracking, but introduced the possibility of motion restart occurring too close to the last motion end in some rare cases. This could result in internal stepcompress errors. Track the last step generation flush time (last_sg_flush_time) and use when recalculating the next print_time. Signed-off-by: Kevin O'Connor --- klippy/toolhead.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/klippy/toolhead.py b/klippy/toolhead.py index 3e9b339ad..6e7b5a946 100644 --- a/klippy/toolhead.py +++ b/klippy/toolhead.py @@ -238,7 +238,8 @@ def __init__(self, config): # Flush tracking self.flush_timer = self.reactor.register_timer(self._flush_handler) self.do_kick_flush_timer = True - self.last_flush_time = self.need_flush_time = self.step_gen_time = 0. + self.last_flush_time = self.last_sg_flush_time = 0. + self.need_flush_time = self.step_gen_time = 0. # Kinematic step generation scan window time tracking self.kin_flush_delay = SDS_CHECK_TIME self.kin_flush_times = [] @@ -286,6 +287,7 @@ def _advance_flush_time(self, flush_time): sg_flush_time = min(flush_time + STEPCOMPRESS_FLUSH_TIME, sg_flush_ceil) for sg in self.step_generators: sg(sg_flush_time) + self.last_sg_flush_time = sg_flush_time # Free trapq entries that are no longer needed free_time = sg_flush_time - self.kin_flush_delay self.trapq_finalize_moves(self.trapq, free_time) @@ -307,7 +309,7 @@ def _advance_move_time(self, next_print_time): def _calc_print_time(self): curtime = self.reactor.monotonic() est_print_time = self.mcu.estimated_print_time(curtime) - kin_time = max(est_print_time + MIN_KIN_TIME, self.last_flush_time) + kin_time = max(est_print_time + MIN_KIN_TIME, self.last_sg_flush_time) kin_time += self.kin_flush_delay min_print_time = max(est_print_time + BUFFER_TIME_START, kin_time) if min_print_time > self.print_time: From 644f7e087284a254d024a475e6880f7d03fc40f2 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Tue, 26 Dec 2023 11:35:27 -0500 Subject: [PATCH 05/63] toolhead: Simplify _advance_flush_time() sg_flush_time calculation Signed-off-by: Kevin O'Connor --- klippy/toolhead.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/klippy/toolhead.py b/klippy/toolhead.py index 6e7b5a946..125ad282c 100644 --- a/klippy/toolhead.py +++ b/klippy/toolhead.py @@ -283,8 +283,9 @@ def __init__(self, config): def _advance_flush_time(self, flush_time): flush_time = max(flush_time, self.last_flush_time) # Generate steps via itersolve - sg_flush_ceil = max(flush_time, self.print_time - self.kin_flush_delay) - sg_flush_time = min(flush_time + STEPCOMPRESS_FLUSH_TIME, sg_flush_ceil) + sg_flush_want = min(flush_time + STEPCOMPRESS_FLUSH_TIME, + self.print_time - self.kin_flush_delay) + sg_flush_time = max(sg_flush_want, flush_time) for sg in self.step_generators: sg(sg_flush_time) self.last_sg_flush_time = sg_flush_time From 978c294741bafac136081f40d066ad2884b1ffce Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Sat, 16 Dec 2023 11:15:23 -0500 Subject: [PATCH 06/63] bulk_sensor: New file with helper code for reading bulk sensors Move the ClockSyncRegression class from adxl345.py to a new bulk_sensors.py file. Signed-off-by: Kevin O'Connor --- klippy/extras/adxl345.py | 57 ++++-------------------------------- klippy/extras/bulk_sensor.py | 53 +++++++++++++++++++++++++++++++++ klippy/extras/lis2dw.py | 14 ++++----- klippy/extras/mpu9250.py | 14 ++++----- 4 files changed, 73 insertions(+), 65 deletions(-) create mode 100644 klippy/extras/bulk_sensor.py diff --git a/klippy/extras/adxl345.py b/klippy/extras/adxl345.py index 52698cb6c..36dc80718 100644 --- a/klippy/extras/adxl345.py +++ b/klippy/extras/adxl345.py @@ -4,7 +4,7 @@ # # This file may be distributed under the terms of the GNU GPLv3 license. import logging, time, collections, threading, multiprocessing, os -from . import bus, motion_report +from . import bus, motion_report, bulk_sensor # ADXL345 registers REG_DEVID = 0x00 @@ -173,59 +173,13 @@ def cmd_ACCELEROMETER_DEBUG_WRITE(self, gcmd): val = gcmd.get("VAL", minval=0, maxval=255, parser=lambda x: int(x, 0)) self.chip.set_reg(reg, val) -# Helper class for chip clock synchronization via linear regression -class ClockSyncRegression: - def __init__(self, mcu, chip_clock_smooth, decay = 1. / 20.): - self.mcu = mcu - self.chip_clock_smooth = chip_clock_smooth - self.decay = decay - self.last_chip_clock = self.last_exp_mcu_clock = 0. - self.mcu_clock_avg = self.mcu_clock_variance = 0. - self.chip_clock_avg = self.chip_clock_covariance = 0. - def reset(self, mcu_clock, chip_clock): - self.mcu_clock_avg = self.last_mcu_clock = mcu_clock - self.chip_clock_avg = chip_clock - self.mcu_clock_variance = self.chip_clock_covariance = 0. - self.last_chip_clock = self.last_exp_mcu_clock = 0. - def update(self, mcu_clock, chip_clock): - # Update linear regression - decay = self.decay - diff_mcu_clock = mcu_clock - self.mcu_clock_avg - self.mcu_clock_avg += decay * diff_mcu_clock - self.mcu_clock_variance = (1. - decay) * ( - self.mcu_clock_variance + diff_mcu_clock**2 * decay) - diff_chip_clock = chip_clock - self.chip_clock_avg - self.chip_clock_avg += decay * diff_chip_clock - self.chip_clock_covariance = (1. - decay) * ( - self.chip_clock_covariance + diff_mcu_clock*diff_chip_clock*decay) - def set_last_chip_clock(self, chip_clock): - base_mcu, base_chip, inv_cfreq = self.get_clock_translation() - self.last_chip_clock = chip_clock - self.last_exp_mcu_clock = base_mcu + (chip_clock-base_chip) * inv_cfreq - def get_clock_translation(self): - inv_chip_freq = self.mcu_clock_variance / self.chip_clock_covariance - if not self.last_chip_clock: - return self.mcu_clock_avg, self.chip_clock_avg, inv_chip_freq - # Find mcu clock associated with future chip_clock - s_chip_clock = self.last_chip_clock + self.chip_clock_smooth - scdiff = s_chip_clock - self.chip_clock_avg - s_mcu_clock = self.mcu_clock_avg + scdiff * inv_chip_freq - # Calculate frequency to converge at future point - mdiff = s_mcu_clock - self.last_exp_mcu_clock - s_inv_chip_freq = mdiff / self.chip_clock_smooth - return self.last_exp_mcu_clock, self.last_chip_clock, s_inv_chip_freq - def get_time_translation(self): - base_mcu, base_chip, inv_cfreq = self.get_clock_translation() - clock_to_print_time = self.mcu.clock_to_print_time - base_time = clock_to_print_time(base_mcu) - inv_freq = clock_to_print_time(base_mcu + inv_cfreq) - base_time - return base_time, base_chip, inv_freq - MIN_MSG_TIME = 0.100 BYTES_PER_SAMPLE = 5 SAMPLES_PER_BLOCK = 10 +API_UPDATES = 0.100 + # Printer class that controls ADXL345 chip class ADXL345: def __init__(self, config): @@ -259,10 +213,11 @@ def __init__(self, config): # Clock tracking self.last_sequence = self.max_query_duration = 0 self.last_limit_count = self.last_error_count = 0 - self.clock_sync = ClockSyncRegression(self.mcu, 640) + chip_smooth = self.data_rate * API_UPDATES * 2 + self.clock_sync = bulk_sensor.ClockSyncRegression(mcu, chip_smooth) # API server endpoints self.api_dump = motion_report.APIDumpHelper( - self.printer, self._api_update, self._api_startstop, 0.100) + self.printer, self._api_update, self._api_startstop, API_UPDATES) self.name = config.get_name().split()[-1] wh = self.printer.lookup_object('webhooks') wh.register_mux_endpoint("adxl345/dump_adxl345", "sensor", self.name, diff --git a/klippy/extras/bulk_sensor.py b/klippy/extras/bulk_sensor.py new file mode 100644 index 000000000..3d76eb6f1 --- /dev/null +++ b/klippy/extras/bulk_sensor.py @@ -0,0 +1,53 @@ +# Tools for reading bulk sensor data from the mcu +# +# Copyright (C) 2020-2023 Kevin O'Connor +# +# This file may be distributed under the terms of the GNU GPLv3 license. + +# Helper class for chip clock synchronization via linear regression +class ClockSyncRegression: + def __init__(self, mcu, chip_clock_smooth, decay = 1. / 20.): + self.mcu = mcu + self.chip_clock_smooth = chip_clock_smooth + self.decay = decay + self.last_chip_clock = self.last_exp_mcu_clock = 0. + self.mcu_clock_avg = self.mcu_clock_variance = 0. + self.chip_clock_avg = self.chip_clock_covariance = 0. + def reset(self, mcu_clock, chip_clock): + self.mcu_clock_avg = self.last_mcu_clock = mcu_clock + self.chip_clock_avg = chip_clock + self.mcu_clock_variance = self.chip_clock_covariance = 0. + self.last_chip_clock = self.last_exp_mcu_clock = 0. + def update(self, mcu_clock, chip_clock): + # Update linear regression + decay = self.decay + diff_mcu_clock = mcu_clock - self.mcu_clock_avg + self.mcu_clock_avg += decay * diff_mcu_clock + self.mcu_clock_variance = (1. - decay) * ( + self.mcu_clock_variance + diff_mcu_clock**2 * decay) + diff_chip_clock = chip_clock - self.chip_clock_avg + self.chip_clock_avg += decay * diff_chip_clock + self.chip_clock_covariance = (1. - decay) * ( + self.chip_clock_covariance + diff_mcu_clock*diff_chip_clock*decay) + def set_last_chip_clock(self, chip_clock): + base_mcu, base_chip, inv_cfreq = self.get_clock_translation() + self.last_chip_clock = chip_clock + self.last_exp_mcu_clock = base_mcu + (chip_clock-base_chip) * inv_cfreq + def get_clock_translation(self): + inv_chip_freq = self.mcu_clock_variance / self.chip_clock_covariance + if not self.last_chip_clock: + return self.mcu_clock_avg, self.chip_clock_avg, inv_chip_freq + # Find mcu clock associated with future chip_clock + s_chip_clock = self.last_chip_clock + self.chip_clock_smooth + scdiff = s_chip_clock - self.chip_clock_avg + s_mcu_clock = self.mcu_clock_avg + scdiff * inv_chip_freq + # Calculate frequency to converge at future point + mdiff = s_mcu_clock - self.last_exp_mcu_clock + s_inv_chip_freq = mdiff / self.chip_clock_smooth + return self.last_exp_mcu_clock, self.last_chip_clock, s_inv_chip_freq + def get_time_translation(self): + base_mcu, base_chip, inv_cfreq = self.get_clock_translation() + clock_to_print_time = self.mcu.clock_to_print_time + base_time = clock_to_print_time(base_mcu) + inv_freq = clock_to_print_time(base_mcu + inv_cfreq) - base_time + return base_time, base_chip, inv_freq diff --git a/klippy/extras/lis2dw.py b/klippy/extras/lis2dw.py index ae62c22f6..174e071a9 100644 --- a/klippy/extras/lis2dw.py +++ b/klippy/extras/lis2dw.py @@ -4,8 +4,8 @@ # Copyright (C) 2020-2021 Kevin O'Connor # # This file may be distributed under the terms of the GNU GPLv3 license. -import logging, time, collections, threading, multiprocessing, os -from . import bus, motion_report, adxl345 +import logging, time, threading, multiprocessing, os +from . import bus, motion_report, adxl345, bulk_sensor # LIS2DW registers REG_LIS2DW_WHO_AM_I_ADDR = 0x0F @@ -30,14 +30,13 @@ FREEFALL_ACCEL = 9.80665 SCALE = FREEFALL_ACCEL * 1.952 / 4 -Accel_Measurement = collections.namedtuple( - 'Accel_Measurement', ('time', 'accel_x', 'accel_y', 'accel_z')) - MIN_MSG_TIME = 0.100 BYTES_PER_SAMPLE = 6 SAMPLES_PER_BLOCK = 8 +API_UPDATES = 0.100 + # Printer class that controls LIS2DW chip class LIS2DW: def __init__(self, config): @@ -69,10 +68,11 @@ def __init__(self, config): # Clock tracking self.last_sequence = self.max_query_duration = 0 self.last_limit_count = self.last_error_count = 0 - self.clock_sync = adxl345.ClockSyncRegression(self.mcu, 640) + chip_smooth = self.data_rate * API_UPDATES * 2 + self.clock_sync = bulk_sensor.ClockSyncRegression(mcu, chip_smooth) # API server endpoints self.api_dump = motion_report.APIDumpHelper( - self.printer, self._api_update, self._api_startstop, 0.100) + self.printer, self._api_update, self._api_startstop, API_UPDATES) self.name = config.get_name().split()[-1] wh = self.printer.lookup_object('webhooks') wh.register_mux_endpoint("lis2dw/dump_lis2dw", "sensor", self.name, diff --git a/klippy/extras/mpu9250.py b/klippy/extras/mpu9250.py index 72c1f6817..204ca8702 100644 --- a/klippy/extras/mpu9250.py +++ b/klippy/extras/mpu9250.py @@ -4,8 +4,8 @@ # Copyright (C) 2020-2021 Kevin O'Connor # # This file may be distributed under the terms of the GNU GPLv3 license. -import logging, time, collections, threading, multiprocessing, os -from . import bus, motion_report, adxl345 +import logging, time, threading, multiprocessing, os +from . import bus, motion_report, adxl345, bulk_sensor MPU9250_ADDR = 0x68 @@ -47,14 +47,13 @@ FIFO_SIZE = 512 -Accel_Measurement = collections.namedtuple( - 'Accel_Measurement', ('time', 'accel_x', 'accel_y', 'accel_z')) - MIN_MSG_TIME = 0.100 BYTES_PER_SAMPLE = 6 SAMPLES_PER_BLOCK = 8 +API_UPDATES = 0.100 + # Printer class that controls MPU9250 chip class MPU9250: def __init__(self, config): @@ -86,10 +85,11 @@ def __init__(self, config): # Clock tracking self.last_sequence = self.max_query_duration = 0 self.last_limit_count = self.last_error_count = 0 - self.clock_sync = adxl345.ClockSyncRegression(self.mcu, 640) + chip_smooth = self.data_rate * API_UPDATES * 2 + self.clock_sync = bulk_sensor.ClockSyncRegression(mcu, chip_smooth) # API server endpoints self.api_dump = motion_report.APIDumpHelper( - self.printer, self._api_update, self._api_startstop, 0.100) + self.printer, self._api_update, self._api_startstop, API_UPDATES) self.name = config.get_name().split()[-1] wh = self.printer.lookup_object('webhooks') wh.register_mux_endpoint("mpu9250/dump_mpu9250", "sensor", self.name, From e67cbbe5c12bae1deb4651e2b3aa12c3c77c3439 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Sat, 16 Dec 2023 11:30:51 -0500 Subject: [PATCH 07/63] bulk_sensor: Add new BulkDataQueue class Move the bulk sample queue collection to a new helper class in bulk_sensor.py. Signed-off-by: Kevin O'Connor --- klippy/extras/adxl345.py | 21 +++++---------------- klippy/extras/angle.py | 23 ++++++----------------- klippy/extras/bulk_sensor.py | 20 ++++++++++++++++++++ klippy/extras/lis2dw.py | 21 +++++---------------- klippy/extras/mpu9250.py | 21 +++++---------------- 5 files changed, 41 insertions(+), 65 deletions(-) diff --git a/klippy/extras/adxl345.py b/klippy/extras/adxl345.py index 36dc80718..1cac6143a 100644 --- a/klippy/extras/adxl345.py +++ b/klippy/extras/adxl345.py @@ -3,7 +3,7 @@ # Copyright (C) 2020-2021 Kevin O'Connor # # This file may be distributed under the terms of the GNU GPLv3 license. -import logging, time, collections, threading, multiprocessing, os +import logging, time, collections, multiprocessing, os from . import bus, motion_report, bulk_sensor # ADXL345 registers @@ -195,9 +195,6 @@ def __init__(self, config): self.data_rate = config.getint('rate', 3200) if self.data_rate not in QUERY_RATES: raise config.error("Invalid rate parameter: %d" % (self.data_rate,)) - # Measurement storage (accessed from background thread) - self.lock = threading.Lock() - self.raw_samples = [] # Setup mcu sensor_adxl345 bulk query code self.spi = bus.MCU_SPI_from_config(config, 3, default_speed=5000000) self.mcu = mcu = self.spi.get_mcu() @@ -209,7 +206,7 @@ def __init__(self, config): mcu.add_config_cmd("query_adxl345 oid=%d clock=0 rest_ticks=0" % (oid,), on_restart=True) mcu.register_config_callback(self._build_config) - mcu.register_response(self._handle_adxl345_data, "adxl345_data", oid) + self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, "adxl345_data", oid) # Clock tracking self.last_sequence = self.max_query_duration = 0 self.last_limit_count = self.last_error_count = 0 @@ -250,9 +247,6 @@ def set_reg(self, reg, val, minclock=0): # Measurement collection def is_measuring(self): return self.query_rate > 0 - def _handle_adxl345_data(self, params): - with self.lock: - self.raw_samples.append(params) def _extract_samples(self, raw_samples): # Load variables to optimize inner loop below (x_pos, x_scale), (y_pos, y_scale), (z_pos, z_scale) = self.axes_map @@ -335,10 +329,8 @@ def _start_measurements(self): self.set_reg(REG_FIFO_CTL, 0x00) self.set_reg(REG_BW_RATE, QUERY_RATES[self.data_rate]) self.set_reg(REG_FIFO_CTL, SET_FIFO_CTL) - # Setup samples - with self.lock: - self.raw_samples = [] # Start bulk reading + self.bulk_queue.clear_samples() systime = self.printer.get_reactor().monotonic() print_time = self.mcu.estimated_print_time(systime) + MIN_MSG_TIME reqclock = self.mcu.print_time_to_clock(print_time) @@ -360,15 +352,12 @@ def _finish_measurements(self): # Halt bulk reading params = self.query_adxl345_end_cmd.send([self.oid, 0, 0]) self.query_rate = 0 - with self.lock: - self.raw_samples = [] + self.bulk_queue.clear_samples() logging.info("ADXL345 finished '%s' measurements", self.name) # API interface def _api_update(self, eventtime): self._update_clock() - with self.lock: - raw_samples = self.raw_samples - self.raw_samples = [] + raw_samples = self.bulk_queue.pull_samples() if not raw_samples: return {} samples = self._extract_samples(raw_samples) diff --git a/klippy/extras/angle.py b/klippy/extras/angle.py index 30a4447ad..26b9c6f0b 100644 --- a/klippy/extras/angle.py +++ b/klippy/extras/angle.py @@ -3,8 +3,8 @@ # Copyright (C) 2021,2022 Kevin O'Connor # # This file may be distributed under the terms of the GNU GPLv3 license. -import logging, math, threading -from . import bus, motion_report +import logging, math +from . import bus, motion_report, bulk_sensor MIN_MSG_TIME = 0.100 TCODE_ERROR = 0xff @@ -417,9 +417,6 @@ def __init__(self, config): # Measurement conversion self.start_clock = self.time_shift = self.sample_ticks = 0 self.last_sequence = self.last_angle = 0 - # Measurement storage (accessed from background thread) - self.lock = threading.Lock() - self.raw_samples = [] # Sensor type sensors = { "a1333": HelperA1333, "as5047d": HelperAS5047D, "tle5012b": HelperTLE5012B } @@ -439,8 +436,7 @@ def __init__(self, config): "query_spi_angle oid=%d clock=0 rest_ticks=0 time_shift=0" % (oid,), on_restart=True) mcu.register_config_callback(self._build_config) - mcu.register_response(self._handle_spi_angle_data, - "spi_angle_data", oid) + self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, "spi_angle_data", oid) # API server endpoints self.api_dump = motion_report.APIDumpHelper( self.printer, self._api_update, self._api_startstop, 0.100) @@ -464,9 +460,6 @@ def get_status(self, eventtime=None): # Measurement collection def is_measuring(self): return self.start_clock != 0 - def _handle_spi_angle_data(self, params): - with self.lock: - self.raw_samples.append(params) def _extract_samples(self, raw_samples): # Load variables to optimize inner loop below sample_ticks = self.sample_ticks @@ -524,9 +517,7 @@ def _extract_samples(self, raw_samples): def _api_update(self, eventtime): if self.sensor_helper.is_tcode_absolute: self.sensor_helper.update_clock() - with self.lock: - raw_samples = self.raw_samples - self.raw_samples = [] + raw_samples = self.bulk_queue.pull_samples() if not raw_samples: return {} samples, error_count = self._extract_samples(raw_samples) @@ -541,8 +532,7 @@ def _start_measurements(self): logging.info("Starting angle '%s' measurements", self.name) self.sensor_helper.start() # Start bulk reading - with self.lock: - self.raw_samples = [] + self.bulk_queue.clear_samples() self.last_sequence = 0 systime = self.printer.get_reactor().monotonic() print_time = self.mcu.estimated_print_time(systime) + MIN_MSG_TIME @@ -557,8 +547,7 @@ def _finish_measurements(self): # Halt bulk reading params = self.query_spi_angle_end_cmd.send([self.oid, 0, 0, 0]) self.start_clock = 0 - with self.lock: - self.raw_samples = [] + self.bulk_queue.clear_samples() self.sensor_helper.last_temperature = None logging.info("Stopped angle '%s' measurements", self.name) def _api_startstop(self, is_start): diff --git a/klippy/extras/bulk_sensor.py b/klippy/extras/bulk_sensor.py index 3d76eb6f1..15749051e 100644 --- a/klippy/extras/bulk_sensor.py +++ b/klippy/extras/bulk_sensor.py @@ -3,6 +3,26 @@ # Copyright (C) 2020-2023 Kevin O'Connor # # This file may be distributed under the terms of the GNU GPLv3 license. +import threading + +# Helper class to store incoming messages in a queue +class BulkDataQueue: + def __init__(self, mcu, msg_name, oid): + # Measurement storage (accessed from background thread) + self.lock = threading.Lock() + self.raw_samples = [] + # Register callback with mcu + mcu.register_response(self._handle_data, msg_name, oid) + def _handle_data(self, params): + with self.lock: + self.raw_samples.append(params) + def pull_samples(self): + with self.lock: + raw_samples = self.raw_samples + self.raw_samples = [] + return raw_samples + def clear_samples(self): + self.pull_samples() # Helper class for chip clock synchronization via linear regression class ClockSyncRegression: diff --git a/klippy/extras/lis2dw.py b/klippy/extras/lis2dw.py index 174e071a9..af9faba58 100644 --- a/klippy/extras/lis2dw.py +++ b/klippy/extras/lis2dw.py @@ -4,7 +4,7 @@ # Copyright (C) 2020-2021 Kevin O'Connor # # This file may be distributed under the terms of the GNU GPLv3 license. -import logging, time, threading, multiprocessing, os +import logging from . import bus, motion_report, adxl345, bulk_sensor # LIS2DW registers @@ -50,9 +50,6 @@ def __init__(self, config): raise config.error("Invalid lis2dw axes_map parameter") self.axes_map = [am[a.strip()] for a in axes_map] self.data_rate = 1600 - # Measurement storage (accessed from background thread) - self.lock = threading.Lock() - self.raw_samples = [] # Setup mcu sensor_lis2dw bulk query code self.spi = bus.MCU_SPI_from_config(config, 3, default_speed=5000000) self.mcu = mcu = self.spi.get_mcu() @@ -64,7 +61,7 @@ def __init__(self, config): mcu.add_config_cmd("query_lis2dw oid=%d clock=0 rest_ticks=0" % (oid,), on_restart=True) mcu.register_config_callback(self._build_config) - mcu.register_response(self._handle_lis2dw_data, "lis2dw_data", oid) + self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, "lis2dw_data", oid) # Clock tracking self.last_sequence = self.max_query_duration = 0 self.last_limit_count = self.last_error_count = 0 @@ -106,9 +103,6 @@ def set_reg(self, reg, val, minclock=0): # Measurement collection def is_measuring(self): return self.query_rate > 0 - def _handle_lis2dw_data(self, params): - with self.lock: - self.raw_samples.append(params) def _extract_samples(self, raw_samples): # Load variables to optimize inner loop below (x_pos, x_scale), (y_pos, y_scale), (z_pos, z_scale) = self.axes_map @@ -198,10 +192,8 @@ def _start_measurements(self): # High-Performance Mode (14-bit resolution) self.set_reg(REG_LIS2DW_CTRL_REG1_ADDR, 0x94) - # Setup samples - with self.lock: - self.raw_samples = [] # Start bulk reading + self.bulk_queue.clear_samples() systime = self.printer.get_reactor().monotonic() print_time = self.mcu.estimated_print_time(systime) + MIN_MSG_TIME reqclock = self.mcu.print_time_to_clock(print_time) @@ -223,16 +215,13 @@ def _finish_measurements(self): # Halt bulk reading params = self.query_lis2dw_end_cmd.send([self.oid, 0, 0]) self.query_rate = 0 - with self.lock: - self.raw_samples = [] + self.bulk_queue.clear_samples() logging.info("LIS2DW finished '%s' measurements", self.name) self.set_reg(REG_LIS2DW_FIFO_CTRL, 0x00) # API interface def _api_update(self, eventtime): self._update_clock() - with self.lock: - raw_samples = self.raw_samples - self.raw_samples = [] + raw_samples = self.bulk_queue.pull_samples() if not raw_samples: return {} samples = self._extract_samples(raw_samples) diff --git a/klippy/extras/mpu9250.py b/klippy/extras/mpu9250.py index 204ca8702..cb5e7b28a 100644 --- a/klippy/extras/mpu9250.py +++ b/klippy/extras/mpu9250.py @@ -4,7 +4,7 @@ # Copyright (C) 2020-2021 Kevin O'Connor # # This file may be distributed under the terms of the GNU GPLv3 license. -import logging, time, threading, multiprocessing, os +import logging, time from . import bus, motion_report, adxl345, bulk_sensor MPU9250_ADDR = 0x68 @@ -69,9 +69,6 @@ def __init__(self, config): self.data_rate = config.getint('rate', 4000) if self.data_rate not in SAMPLE_RATE_DIVS: raise config.error("Invalid rate parameter: %d" % (self.data_rate,)) - # Measurement storage (accessed from background thread) - self.lock = threading.Lock() - self.raw_samples = [] # Setup mcu sensor_mpu9250 bulk query code self.i2c = bus.MCU_I2C_from_config(config, default_addr=MPU9250_ADDR, @@ -81,7 +78,7 @@ def __init__(self, config): self.query_mpu9250_cmd = self.query_mpu9250_end_cmd = None self.query_mpu9250_status_cmd = None mcu.register_config_callback(self._build_config) - mcu.register_response(self._handle_mpu9250_data, "mpu9250_data", oid) + self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, "mpu9250_data", oid) # Clock tracking self.last_sequence = self.max_query_duration = 0 self.last_limit_count = self.last_error_count = 0 @@ -120,9 +117,6 @@ def set_reg(self, reg, val, minclock=0): # Measurement collection def is_measuring(self): return self.query_rate > 0 - def _handle_mpu9250_data(self, params): - with self.lock: - self.raw_samples.append(params) def _extract_samples(self, raw_samples): # Load variables to optimize inner loop below (x_pos, x_scale), (y_pos, y_scale), (z_pos, z_scale) = self.axes_map @@ -210,10 +204,8 @@ def _start_measurements(self): self.set_reg(REG_ACCEL_CONFIG, SET_ACCEL_CONFIG) self.set_reg(REG_ACCEL_CONFIG2, SET_ACCEL_CONFIG2) - # Setup samples - with self.lock: - self.raw_samples = [] # Start bulk reading + self.bulk_queue.clear_samples() systime = self.printer.get_reactor().monotonic() print_time = self.mcu.estimated_print_time(systime) + MIN_MSG_TIME reqclock = self.mcu.print_time_to_clock(print_time) @@ -235,8 +227,7 @@ def _finish_measurements(self): # Halt bulk reading params = self.query_mpu9250_end_cmd.send([self.oid, 0, 0]) self.query_rate = 0 - with self.lock: - self.raw_samples = [] + self.bulk_queue.clear_samples() logging.info("MPU9250 finished '%s' measurements", self.name) self.set_reg(REG_PWR_MGMT_1, SET_PWR_MGMT_1_SLEEP) self.set_reg(REG_PWR_MGMT_2, SET_PWR_MGMT_2_OFF) @@ -244,9 +235,7 @@ def _finish_measurements(self): # API interface def _api_update(self, eventtime): self._update_clock() - with self.lock: - raw_samples = self.raw_samples - self.raw_samples = [] + raw_samples = self.bulk_queue.pull_samples() if not raw_samples: return {} samples = self._extract_samples(raw_samples) From d6a4669ce092496bb675a28732aa5f6a11360caf Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Sat, 16 Dec 2023 13:01:34 -0500 Subject: [PATCH 08/63] bulk_sensor: Add new ChipClockUpdater helper class All the accelerometers use a standard response for their query_status messages. Create a common helper class to process those responses. Signed-off-by: Kevin O'Connor --- klippy/extras/adxl345.py | 40 ++++++++--------------------- klippy/extras/bulk_sensor.py | 48 +++++++++++++++++++++++++++++++++++ klippy/extras/lis2dw.py | 49 ++++++++---------------------------- klippy/extras/mpu9250.py | 49 ++++++++---------------------------- 4 files changed, 80 insertions(+), 106 deletions(-) diff --git a/klippy/extras/adxl345.py b/klippy/extras/adxl345.py index 1cac6143a..bc2dfeb46 100644 --- a/klippy/extras/adxl345.py +++ b/klippy/extras/adxl345.py @@ -1,6 +1,6 @@ # Support for reading acceleration data from an adxl345 chip # -# Copyright (C) 2020-2021 Kevin O'Connor +# Copyright (C) 2020-2023 Kevin O'Connor # # This file may be distributed under the terms of the GNU GPLv3 license. import logging, time, collections, multiprocessing, os @@ -208,10 +208,11 @@ def __init__(self, config): mcu.register_config_callback(self._build_config) self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, "adxl345_data", oid) # Clock tracking - self.last_sequence = self.max_query_duration = 0 - self.last_limit_count = self.last_error_count = 0 chip_smooth = self.data_rate * API_UPDATES * 2 self.clock_sync = bulk_sensor.ClockSyncRegression(mcu, chip_smooth) + self.clock_updater = bulk_sensor.ChipClockUpdater(self.clock_sync, + BYTES_PER_SAMPLE) + self.last_error_count = 0 # API server endpoints self.api_dump = motion_report.APIDumpHelper( self.printer, self._api_update, self._api_startstop, API_UPDATES) @@ -250,7 +251,7 @@ def is_measuring(self): def _extract_samples(self, raw_samples): # Load variables to optimize inner loop below (x_pos, x_scale), (y_pos, y_scale), (z_pos, z_scale) = self.axes_map - last_sequence = self.last_sequence + last_sequence = self.clock_updater.get_last_sequence() time_base, chip_base, inv_freq = self.clock_sync.get_time_translation() # Process every message in raw_samples count = seq = 0 @@ -291,26 +292,7 @@ def _update_clock(self, minclock=0): break else: raise self.printer.command_error("Unable to query adxl345 fifo") - mcu_clock = self.mcu.clock32_to_clock64(params['clock']) - seq_diff = (params['next_sequence'] - self.last_sequence) & 0xffff - self.last_sequence += seq_diff - buffered = params['buffered'] - lc_diff = (params['limit_count'] - self.last_limit_count) & 0xffff - self.last_limit_count += lc_diff - duration = params['query_ticks'] - if duration > self.max_query_duration: - # Skip measurement as a high query time could skew clock tracking - self.max_query_duration = max(2 * self.max_query_duration, - self.mcu.seconds_to_clock(.000005)) - return - self.max_query_duration = 2 * duration - msg_count = (self.last_sequence * SAMPLES_PER_BLOCK - + buffered // BYTES_PER_SAMPLE + fifo) - # The "chip clock" is the message counter plus .5 for average - # inaccuracy of query responses and plus .5 for assumed offset - # of adxl345 hw processing time. - chip_clock = msg_count + 1 - self.clock_sync.update(mcu_clock + duration // 2, chip_clock) + self.clock_updater.update_clock(params) def _start_measurements(self): if self.is_measuring(): return @@ -340,12 +322,10 @@ def _start_measurements(self): reqclock=reqclock) logging.info("ADXL345 starting '%s' measurements", self.name) # Initialize clock tracking - self.last_sequence = 0 - self.last_limit_count = self.last_error_count = 0 - self.clock_sync.reset(reqclock, 0) - self.max_query_duration = 1 << 31 + self.clock_updater.note_start(reqclock) self._update_clock(minclock=reqclock) - self.max_query_duration = 1 << 31 + self.clock_updater.clear_duration_filter() + self.last_error_count = 0 def _finish_measurements(self): if not self.is_measuring(): return @@ -364,7 +344,7 @@ def _api_update(self, eventtime): if not samples: return {} return {'data': samples, 'errors': self.last_error_count, - 'overflows': self.last_limit_count} + 'overflows': self.clock_updater.get_last_limit_count()} def _api_startstop(self, is_start): if is_start: self._start_measurements() diff --git a/klippy/extras/bulk_sensor.py b/klippy/extras/bulk_sensor.py index 15749051e..28aed48ab 100644 --- a/klippy/extras/bulk_sensor.py +++ b/klippy/extras/bulk_sensor.py @@ -71,3 +71,51 @@ def get_time_translation(self): base_time = clock_to_print_time(base_mcu) inv_freq = clock_to_print_time(base_mcu + inv_cfreq) - base_time return base_time, base_chip, inv_freq + +MAX_BULK_MSG_SIZE = 52 + +# Handle common periodic chip status query responses +class ChipClockUpdater: + def __init__(self, clock_sync, bytes_per_sample): + self.clock_sync = clock_sync + self.bytes_per_sample = bytes_per_sample + self.samples_per_block = MAX_BULK_MSG_SIZE // bytes_per_sample + self.mcu = clock_sync.mcu + self.last_sequence = self.max_query_duration = 0 + self.last_limit_count = 0 + def get_last_sequence(self): + return self.last_sequence + def get_last_limit_count(self): + return self.last_limit_count + def clear_duration_filter(self): + self.max_query_duration = 1 << 31 + def note_start(self, reqclock): + self.last_sequence = 0 + self.last_limit_count = 0 + self.clock_sync.reset(reqclock, 0) + self.clear_duration_filter() + def update_clock(self, params): + # Handle a status response message of the form: + # adxl345_status oid=x clock=x query_ticks=x next_sequence=x + # buffered=x fifo=x limit_count=x + fifo = params['fifo'] + mcu_clock = self.mcu.clock32_to_clock64(params['clock']) + seq_diff = (params['next_sequence'] - self.last_sequence) & 0xffff + self.last_sequence += seq_diff + buffered = params['buffered'] + lc_diff = (params['limit_count'] - self.last_limit_count) & 0xffff + self.last_limit_count += lc_diff + duration = params['query_ticks'] + if duration > self.max_query_duration: + # Skip measurement as a high query time could skew clock tracking + self.max_query_duration = max(2 * self.max_query_duration, + self.mcu.seconds_to_clock(.000005)) + return + self.max_query_duration = 2 * duration + msg_count = (self.last_sequence * self.samples_per_block + + buffered // self.bytes_per_sample + fifo) + # The "chip clock" is the message counter plus .5 for average + # inaccuracy of query responses and plus .5 for assumed offset + # of hardware processing time. + chip_clock = msg_count + 1 + self.clock_sync.update(mcu_clock + duration // 2, chip_clock) diff --git a/klippy/extras/lis2dw.py b/klippy/extras/lis2dw.py index af9faba58..a1e89123b 100644 --- a/klippy/extras/lis2dw.py +++ b/klippy/extras/lis2dw.py @@ -63,10 +63,11 @@ def __init__(self, config): mcu.register_config_callback(self._build_config) self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, "lis2dw_data", oid) # Clock tracking - self.last_sequence = self.max_query_duration = 0 - self.last_limit_count = self.last_error_count = 0 chip_smooth = self.data_rate * API_UPDATES * 2 self.clock_sync = bulk_sensor.ClockSyncRegression(mcu, chip_smooth) + self.clock_updater = bulk_sensor.ChipClockUpdater(self.clock_sync, + BYTES_PER_SAMPLE) + self.last_error_count = 0 # API server endpoints self.api_dump = motion_report.APIDumpHelper( self.printer, self._api_update, self._api_startstop, API_UPDATES) @@ -106,7 +107,7 @@ def is_measuring(self): def _extract_samples(self, raw_samples): # Load variables to optimize inner loop below (x_pos, x_scale), (y_pos, y_scale), (z_pos, z_scale) = self.axes_map - last_sequence = self.last_sequence + last_sequence = self.clock_updater.get_last_sequence() time_base, chip_base, inv_freq = self.clock_sync.get_time_translation() # Process every message in raw_samples count = seq = 0 @@ -140,35 +141,9 @@ def _extract_samples(self, raw_samples): del samples[count:] return samples def _update_clock(self, minclock=0): - # Query current state - for retry in range(5): - params = self.query_lis2dw_status_cmd.send([self.oid], - minclock=minclock) - fifo = params['fifo'] & 0x1f - if fifo <= 32: - break - else: - raise self.printer.command_error("Unable to query lis2dw fifo") - mcu_clock = self.mcu.clock32_to_clock64(params['clock']) - seq_diff = (params['next_sequence'] - self.last_sequence) & 0xffff - self.last_sequence += seq_diff - buffered = params['buffered'] - lc_diff = (params['limit_count'] - self.last_limit_count) & 0xffff - self.last_limit_count += lc_diff - duration = params['query_ticks'] - if duration > self.max_query_duration: - # Skip measurement as a high query time could skew clock tracking - self.max_query_duration = max(2 * self.max_query_duration, - self.mcu.seconds_to_clock(.000005)) - return - self.max_query_duration = 2 * duration - msg_count = (self.last_sequence * SAMPLES_PER_BLOCK - + buffered // BYTES_PER_SAMPLE + fifo) - # The "chip clock" is the message counter plus .5 for average - # inaccuracy of query responses and plus .5 for assumed offset - # of lis2dw hw processing time. - chip_clock = msg_count + 1 - self.clock_sync.update(mcu_clock + duration // 2, chip_clock) + params = self.query_lis2dw_status_cmd.send([self.oid], + minclock=minclock) + self.clock_updater.update_clock(params) def _start_measurements(self): if self.is_measuring(): return @@ -203,12 +178,10 @@ def _start_measurements(self): reqclock=reqclock) logging.info("LIS2DW starting '%s' measurements", self.name) # Initialize clock tracking - self.last_sequence = 0 - self.last_limit_count = self.last_error_count = 0 - self.clock_sync.reset(reqclock, 0) - self.max_query_duration = 1 << 31 + self.clock_updater.note_start(reqclock) self._update_clock(minclock=reqclock) - self.max_query_duration = 1 << 31 + self.clock_updater.clear_duration_filter() + self.last_error_count = 0 def _finish_measurements(self): if not self.is_measuring(): return @@ -228,7 +201,7 @@ def _api_update(self, eventtime): if not samples: return {} return {'data': samples, 'errors': self.last_error_count, - 'overflows': self.last_limit_count} + 'overflows': self.clock_updater.get_last_limit_count()} def _api_startstop(self, is_start): if is_start: self._start_measurements() diff --git a/klippy/extras/mpu9250.py b/klippy/extras/mpu9250.py index cb5e7b28a..6ca300cdf 100644 --- a/klippy/extras/mpu9250.py +++ b/klippy/extras/mpu9250.py @@ -80,10 +80,11 @@ def __init__(self, config): mcu.register_config_callback(self._build_config) self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, "mpu9250_data", oid) # Clock tracking - self.last_sequence = self.max_query_duration = 0 - self.last_limit_count = self.last_error_count = 0 chip_smooth = self.data_rate * API_UPDATES * 2 self.clock_sync = bulk_sensor.ClockSyncRegression(mcu, chip_smooth) + self.clock_updater = bulk_sensor.ChipClockUpdater(self.clock_sync, + BYTES_PER_SAMPLE) + self.last_error_count = 0 # API server endpoints self.api_dump = motion_report.APIDumpHelper( self.printer, self._api_update, self._api_startstop, API_UPDATES) @@ -120,7 +121,7 @@ def is_measuring(self): def _extract_samples(self, raw_samples): # Load variables to optimize inner loop below (x_pos, x_scale), (y_pos, y_scale), (z_pos, z_scale) = self.axes_map - last_sequence = self.last_sequence + last_sequence = self.clock_updater.get_last_sequence() time_base, chip_base, inv_freq = self.clock_sync.get_time_translation() # Process every message in raw_samples count = seq = 0 @@ -152,35 +153,9 @@ def _extract_samples(self, raw_samples): return samples def _update_clock(self, minclock=0): - # Query current state - for retry in range(5): - params = self.query_mpu9250_status_cmd.send([self.oid], - minclock=minclock) - fifo = params['fifo'] & 0x1fff - if fifo <= FIFO_SIZE: - break - else: - raise self.printer.command_error("Unable to query mpu9250 fifo") - mcu_clock = self.mcu.clock32_to_clock64(params['clock']) - seq_diff = (params['next_sequence'] - self.last_sequence) & 0xffff - self.last_sequence += seq_diff - buffered = params['buffered'] - lc_diff = (params['limit_count'] - self.last_limit_count) & 0xffff - self.last_limit_count += lc_diff - duration = params['query_ticks'] - if duration > self.max_query_duration: - # Skip measurement as a high query time could skew clock tracking - self.max_query_duration = max(2 * self.max_query_duration, - self.mcu.seconds_to_clock(.000005)) - return - self.max_query_duration = 2 * duration - msg_count = (self.last_sequence * SAMPLES_PER_BLOCK - + buffered // BYTES_PER_SAMPLE + fifo) - # The "chip clock" is the message counter plus .5 for average - # inaccuracy of query responses and plus .5 for assumed offset - # of mpu9250 hw processing time. - chip_clock = msg_count + 1 - self.clock_sync.update(mcu_clock + duration // 2, chip_clock) + params = self.query_mpu9250_status_cmd.send([self.oid], + minclock=minclock) + self.clock_updater.update_clock(params) def _start_measurements(self): if self.is_measuring(): return @@ -215,12 +190,10 @@ def _start_measurements(self): reqclock=reqclock) logging.info("MPU9250 starting '%s' measurements", self.name) # Initialize clock tracking - self.last_sequence = 0 - self.last_limit_count = self.last_error_count = 0 - self.clock_sync.reset(reqclock, 0) - self.max_query_duration = 1 << 31 + self.clock_updater.note_start(reqclock) self._update_clock(minclock=reqclock) - self.max_query_duration = 1 << 31 + self.clock_updater.clear_duration_filter() + self.last_error_count = 0 def _finish_measurements(self): if not self.is_measuring(): return @@ -242,7 +215,7 @@ def _api_update(self, eventtime): if not samples: return {} return {'data': samples, 'errors': self.last_error_count, - 'overflows': self.last_limit_count} + 'overflows': self.clock_updater.get_last_limit_count()} def _api_startstop(self, is_start): if is_start: self._start_measurements() From 43ce7c0b9ad4f30277c10b086b86a0937dbfebbc Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Sat, 16 Dec 2023 22:00:23 -0500 Subject: [PATCH 09/63] adxl345: No need to implement is_measuring() The APIDumpHelper class already ensures that the start/stop callbacks will only be called when needed. Signed-off-by: Kevin O'Connor --- klippy/extras/adxl345.py | 9 --------- klippy/extras/angle.py | 7 ------- klippy/extras/lis2dw.py | 9 --------- klippy/extras/mpu9250.py | 9 --------- 4 files changed, 34 deletions(-) diff --git a/klippy/extras/adxl345.py b/klippy/extras/adxl345.py index bc2dfeb46..c15dca4be 100644 --- a/klippy/extras/adxl345.py +++ b/klippy/extras/adxl345.py @@ -185,7 +185,6 @@ class ADXL345: def __init__(self, config): self.printer = config.get_printer() AccelCommandHelper(config, self) - self.query_rate = 0 am = {'x': (0, SCALE_XY), 'y': (1, SCALE_XY), 'z': (2, SCALE_Z), '-x': (0, -SCALE_XY), '-y': (1, -SCALE_XY), '-z': (2, -SCALE_Z)} axes_map = config.getlist('axes_map', ('x','y','z'), count=3) @@ -246,8 +245,6 @@ def set_reg(self, reg, val, minclock=0): "(e.g. faulty wiring) or a faulty adxl345 chip." % ( reg, val, stored_val)) # Measurement collection - def is_measuring(self): - return self.query_rate > 0 def _extract_samples(self, raw_samples): # Load variables to optimize inner loop below (x_pos, x_scale), (y_pos, y_scale), (z_pos, z_scale) = self.axes_map @@ -294,8 +291,6 @@ def _update_clock(self, minclock=0): raise self.printer.command_error("Unable to query adxl345 fifo") self.clock_updater.update_clock(params) def _start_measurements(self): - if self.is_measuring(): - return # In case of miswiring, testing ADXL345 device ID prevents treating # noise or wrong signal as a correctly initialized device dev_id = self.read_reg(REG_DEVID) @@ -317,7 +312,6 @@ def _start_measurements(self): print_time = self.mcu.estimated_print_time(systime) + MIN_MSG_TIME reqclock = self.mcu.print_time_to_clock(print_time) rest_ticks = self.mcu.seconds_to_clock(4. / self.data_rate) - self.query_rate = self.data_rate self.query_adxl345_cmd.send([self.oid, reqclock, rest_ticks], reqclock=reqclock) logging.info("ADXL345 starting '%s' measurements", self.name) @@ -327,11 +321,8 @@ def _start_measurements(self): self.clock_updater.clear_duration_filter() self.last_error_count = 0 def _finish_measurements(self): - if not self.is_measuring(): - return # Halt bulk reading params = self.query_adxl345_end_cmd.send([self.oid, 0, 0]) - self.query_rate = 0 self.bulk_queue.clear_samples() logging.info("ADXL345 finished '%s' measurements", self.name) # API interface diff --git a/klippy/extras/angle.py b/klippy/extras/angle.py index 26b9c6f0b..5bfbb6718 100644 --- a/klippy/extras/angle.py +++ b/klippy/extras/angle.py @@ -458,8 +458,6 @@ def _build_config(self): def get_status(self, eventtime=None): return {'temperature': self.sensor_helper.last_temperature} # Measurement collection - def is_measuring(self): - return self.start_clock != 0 def _extract_samples(self, raw_samples): # Load variables to optimize inner loop below sample_ticks = self.sample_ticks @@ -527,8 +525,6 @@ def _api_update(self, eventtime): return {'data': samples, 'errors': error_count, 'position_offset': offset} def _start_measurements(self): - if self.is_measuring(): - return logging.info("Starting angle '%s' measurements", self.name) self.sensor_helper.start() # Start bulk reading @@ -542,11 +538,8 @@ def _start_measurements(self): self.query_spi_angle_cmd.send([self.oid, reqclock, rest_ticks, self.time_shift], reqclock=reqclock) def _finish_measurements(self): - if not self.is_measuring(): - return # Halt bulk reading params = self.query_spi_angle_end_cmd.send([self.oid, 0, 0, 0]) - self.start_clock = 0 self.bulk_queue.clear_samples() self.sensor_helper.last_temperature = None logging.info("Stopped angle '%s' measurements", self.name) diff --git a/klippy/extras/lis2dw.py b/klippy/extras/lis2dw.py index a1e89123b..7eaf38d70 100644 --- a/klippy/extras/lis2dw.py +++ b/klippy/extras/lis2dw.py @@ -42,7 +42,6 @@ class LIS2DW: def __init__(self, config): self.printer = config.get_printer() adxl345.AccelCommandHelper(config, self) - self.query_rate = 0 am = {'x': (0, SCALE), 'y': (1, SCALE), 'z': (2, SCALE), '-x': (0, -SCALE), '-y': (1, -SCALE), '-z': (2, -SCALE)} axes_map = config.getlist('axes_map', ('x','y','z'), count=3) @@ -102,8 +101,6 @@ def set_reg(self, reg, val, minclock=0): "(e.g. faulty wiring) or a faulty lis2dw chip." % ( reg, val, stored_val)) # Measurement collection - def is_measuring(self): - return self.query_rate > 0 def _extract_samples(self, raw_samples): # Load variables to optimize inner loop below (x_pos, x_scale), (y_pos, y_scale), (z_pos, z_scale) = self.axes_map @@ -145,8 +142,6 @@ def _update_clock(self, minclock=0): minclock=minclock) self.clock_updater.update_clock(params) def _start_measurements(self): - if self.is_measuring(): - return # In case of miswiring, testing LIS2DW device ID prevents treating # noise or wrong signal as a correctly initialized device dev_id = self.read_reg(REG_LIS2DW_WHO_AM_I_ADDR) @@ -173,7 +168,6 @@ def _start_measurements(self): print_time = self.mcu.estimated_print_time(systime) + MIN_MSG_TIME reqclock = self.mcu.print_time_to_clock(print_time) rest_ticks = self.mcu.seconds_to_clock(4. / self.data_rate) - self.query_rate = self.data_rate self.query_lis2dw_cmd.send([self.oid, reqclock, rest_ticks], reqclock=reqclock) logging.info("LIS2DW starting '%s' measurements", self.name) @@ -183,11 +177,8 @@ def _start_measurements(self): self.clock_updater.clear_duration_filter() self.last_error_count = 0 def _finish_measurements(self): - if not self.is_measuring(): - return # Halt bulk reading params = self.query_lis2dw_end_cmd.send([self.oid, 0, 0]) - self.query_rate = 0 self.bulk_queue.clear_samples() logging.info("LIS2DW finished '%s' measurements", self.name) self.set_reg(REG_LIS2DW_FIFO_CTRL, 0x00) diff --git a/klippy/extras/mpu9250.py b/klippy/extras/mpu9250.py index 6ca300cdf..d1ef99545 100644 --- a/klippy/extras/mpu9250.py +++ b/klippy/extras/mpu9250.py @@ -59,7 +59,6 @@ class MPU9250: def __init__(self, config): self.printer = config.get_printer() adxl345.AccelCommandHelper(config, self) - self.query_rate = 0 am = {'x': (0, SCALE), 'y': (1, SCALE), 'z': (2, SCALE), '-x': (0, -SCALE), '-y': (1, -SCALE), '-z': (2, -SCALE)} axes_map = config.getlist('axes_map', ('x','y','z'), count=3) @@ -116,8 +115,6 @@ def set_reg(self, reg, val, minclock=0): self.i2c.i2c_write([reg, val & 0xFF], minclock=minclock) # Measurement collection - def is_measuring(self): - return self.query_rate > 0 def _extract_samples(self, raw_samples): # Load variables to optimize inner loop below (x_pos, x_scale), (y_pos, y_scale), (z_pos, z_scale) = self.axes_map @@ -157,8 +154,6 @@ def _update_clock(self, minclock=0): minclock=minclock) self.clock_updater.update_clock(params) def _start_measurements(self): - if self.is_measuring(): - return # In case of miswiring, testing MPU9250 device ID prevents treating # noise or wrong signal as a correctly initialized device dev_id = self.read_reg(REG_DEVID) @@ -185,7 +180,6 @@ def _start_measurements(self): print_time = self.mcu.estimated_print_time(systime) + MIN_MSG_TIME reqclock = self.mcu.print_time_to_clock(print_time) rest_ticks = self.mcu.seconds_to_clock(4. / self.data_rate) - self.query_rate = self.data_rate self.query_mpu9250_cmd.send([self.oid, reqclock, rest_ticks], reqclock=reqclock) logging.info("MPU9250 starting '%s' measurements", self.name) @@ -195,11 +189,8 @@ def _start_measurements(self): self.clock_updater.clear_duration_filter() self.last_error_count = 0 def _finish_measurements(self): - if not self.is_measuring(): - return # Halt bulk reading params = self.query_mpu9250_end_cmd.send([self.oid, 0, 0]) - self.query_rate = 0 self.bulk_queue.clear_samples() logging.info("MPU9250 finished '%s' measurements", self.name) self.set_reg(REG_PWR_MGMT_1, SET_PWR_MGMT_1_SLEEP) From 3f84501955c9eb6774f1a3ad0069dbda103bdbae Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Sun, 17 Dec 2023 02:10:43 -0500 Subject: [PATCH 10/63] adxl345: Add a read_axes_map() helper function Signed-off-by: Kevin O'Connor --- klippy/extras/adxl345.py | 16 ++++++++++------ klippy/extras/lis2dw.py | 7 +------ klippy/extras/mpu9250.py | 7 +------ 3 files changed, 12 insertions(+), 18 deletions(-) diff --git a/klippy/extras/adxl345.py b/klippy/extras/adxl345.py index c15dca4be..be4a0eacd 100644 --- a/klippy/extras/adxl345.py +++ b/klippy/extras/adxl345.py @@ -173,6 +173,15 @@ def cmd_ACCELEROMETER_DEBUG_WRITE(self, gcmd): val = gcmd.get("VAL", minval=0, maxval=255, parser=lambda x: int(x, 0)) self.chip.set_reg(reg, val) +# Helper to read the axes_map parameter from the config +def read_axes_map(config): + am = {'x': (0, SCALE_XY), 'y': (1, SCALE_XY), 'z': (2, SCALE_Z), + '-x': (0, -SCALE_XY), '-y': (1, -SCALE_XY), '-z': (2, -SCALE_Z)} + axes_map = config.getlist('axes_map', ('x','y','z'), count=3) + if any([a not in am for a in axes_map]): + raise config.error("Invalid axes_map parameter") + return [am[a.strip()] for a in axes_map] + MIN_MSG_TIME = 0.100 BYTES_PER_SAMPLE = 5 @@ -185,12 +194,7 @@ class ADXL345: def __init__(self, config): self.printer = config.get_printer() AccelCommandHelper(config, self) - am = {'x': (0, SCALE_XY), 'y': (1, SCALE_XY), 'z': (2, SCALE_Z), - '-x': (0, -SCALE_XY), '-y': (1, -SCALE_XY), '-z': (2, -SCALE_Z)} - axes_map = config.getlist('axes_map', ('x','y','z'), count=3) - if any([a not in am for a in axes_map]): - raise config.error("Invalid adxl345 axes_map parameter") - self.axes_map = [am[a.strip()] for a in axes_map] + self.axes_map = read_axes_map(config) self.data_rate = config.getint('rate', 3200) if self.data_rate not in QUERY_RATES: raise config.error("Invalid rate parameter: %d" % (self.data_rate,)) diff --git a/klippy/extras/lis2dw.py b/klippy/extras/lis2dw.py index 7eaf38d70..82673fc8c 100644 --- a/klippy/extras/lis2dw.py +++ b/klippy/extras/lis2dw.py @@ -42,12 +42,7 @@ class LIS2DW: def __init__(self, config): self.printer = config.get_printer() adxl345.AccelCommandHelper(config, self) - am = {'x': (0, SCALE), 'y': (1, SCALE), 'z': (2, SCALE), - '-x': (0, -SCALE), '-y': (1, -SCALE), '-z': (2, -SCALE)} - axes_map = config.getlist('axes_map', ('x','y','z'), count=3) - if any([a not in am for a in axes_map]): - raise config.error("Invalid lis2dw axes_map parameter") - self.axes_map = [am[a.strip()] for a in axes_map] + self.axes_map = adxl345.read_axes_map(config) self.data_rate = 1600 # Setup mcu sensor_lis2dw bulk query code self.spi = bus.MCU_SPI_from_config(config, 3, default_speed=5000000) diff --git a/klippy/extras/mpu9250.py b/klippy/extras/mpu9250.py index d1ef99545..d9eb242eb 100644 --- a/klippy/extras/mpu9250.py +++ b/klippy/extras/mpu9250.py @@ -59,12 +59,7 @@ class MPU9250: def __init__(self, config): self.printer = config.get_printer() adxl345.AccelCommandHelper(config, self) - am = {'x': (0, SCALE), 'y': (1, SCALE), 'z': (2, SCALE), - '-x': (0, -SCALE), '-y': (1, -SCALE), '-z': (2, -SCALE)} - axes_map = config.getlist('axes_map', ('x','y','z'), count=3) - if any([a not in am for a in axes_map]): - raise config.error("Invalid mpu9250 axes_map parameter") - self.axes_map = [am[a.strip()] for a in axes_map] + self.axes_map = adxl345.read_axes_map(config) self.data_rate = config.getint('rate', 4000) if self.data_rate not in SAMPLE_RATE_DIVS: raise config.error("Invalid rate parameter: %d" % (self.data_rate,)) From f4c8f0bf88743fac33bb4c3f9ea7c31bdb505467 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Sun, 17 Dec 2023 02:35:08 -0500 Subject: [PATCH 11/63] angle: Define BYTES_PER_SAMPLE and SAMPLES_PER_BLOCK This makes the code a little more readable. Signed-off-by: Kevin O'Connor --- klippy/extras/angle.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/klippy/extras/angle.py b/klippy/extras/angle.py index 5bfbb6718..490209739 100644 --- a/klippy/extras/angle.py +++ b/klippy/extras/angle.py @@ -406,6 +406,9 @@ def cmd_ANGLE_DEBUG_WRITE(self, gcmd): parser=lambda x: int(x, 0)) self._write_reg(reg, val) +BYTES_PER_SAMPLE = 3 +SAMPLES_PER_BLOCK = 16 + SAMPLE_PERIOD = 0.000400 class Angle: @@ -478,18 +481,20 @@ def _extract_samples(self, raw_samples): static_delay = self.sensor_helper.get_static_delay() # Process every message in raw_samples count = error_count = 0 - samples = [None] * (len(raw_samples) * 16) + samples = [None] * (len(raw_samples) * SAMPLES_PER_BLOCK) for params in raw_samples: seq_diff = (params['sequence'] - last_sequence) & 0xffff last_sequence += seq_diff + samp_count = last_sequence * SAMPLES_PER_BLOCK + msg_mclock = start_clock + samp_count*sample_ticks d = bytearray(params['data']) - msg_mclock = start_clock + last_sequence*16*sample_ticks - for i in range(len(d) // 3): - tcode = d[i*3] + for i in range(len(d) // BYTES_PER_SAMPLE): + d_ta = d[i*BYTES_PER_SAMPLE:(i+1)*BYTES_PER_SAMPLE] + tcode = d_ta[0] if tcode == TCODE_ERROR: error_count += 1 continue - raw_angle = d[i*3 + 1] | (d[i*3 + 2] << 8) + raw_angle = d_ta[1] | (d_ta[2] << 8) angle_diff = (raw_angle - last_angle) & 0xffff angle_diff -= (angle_diff & 0x8000) << 1 last_angle += angle_diff From ffd44c02194ecabdd559cac1654aa9a65ecb2c28 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Sat, 16 Dec 2023 14:07:06 -0500 Subject: [PATCH 12/63] bulk_sensor: Move APIDumpHelper() from motion_report.py to bulk_sensor.py Signed-off-by: Kevin O'Connor --- klippy/extras/adxl345.py | 4 +- klippy/extras/angle.py | 4 +- klippy/extras/bulk_sensor.py | 93 ++++++++++++++++++++++++++++++++ klippy/extras/lis2dw.py | 4 +- klippy/extras/motion_report.py | 98 ++-------------------------------- klippy/extras/mpu9250.py | 4 +- 6 files changed, 104 insertions(+), 103 deletions(-) diff --git a/klippy/extras/adxl345.py b/klippy/extras/adxl345.py index be4a0eacd..6871ef535 100644 --- a/klippy/extras/adxl345.py +++ b/klippy/extras/adxl345.py @@ -4,7 +4,7 @@ # # This file may be distributed under the terms of the GNU GPLv3 license. import logging, time, collections, multiprocessing, os -from . import bus, motion_report, bulk_sensor +from . import bus, bulk_sensor # ADXL345 registers REG_DEVID = 0x00 @@ -217,7 +217,7 @@ def __init__(self, config): BYTES_PER_SAMPLE) self.last_error_count = 0 # API server endpoints - self.api_dump = motion_report.APIDumpHelper( + self.api_dump = bulk_sensor.APIDumpHelper( self.printer, self._api_update, self._api_startstop, API_UPDATES) self.name = config.get_name().split()[-1] wh = self.printer.lookup_object('webhooks') diff --git a/klippy/extras/angle.py b/klippy/extras/angle.py index 490209739..066167d0b 100644 --- a/klippy/extras/angle.py +++ b/klippy/extras/angle.py @@ -4,7 +4,7 @@ # # This file may be distributed under the terms of the GNU GPLv3 license. import logging, math -from . import bus, motion_report, bulk_sensor +from . import bus, bulk_sensor MIN_MSG_TIME = 0.100 TCODE_ERROR = 0xff @@ -441,7 +441,7 @@ def __init__(self, config): mcu.register_config_callback(self._build_config) self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, "spi_angle_data", oid) # API server endpoints - self.api_dump = motion_report.APIDumpHelper( + self.api_dump = bulk_sensor.APIDumpHelper( self.printer, self._api_update, self._api_startstop, 0.100) self.name = config.get_name().split()[1] wh = self.printer.lookup_object('webhooks') diff --git a/klippy/extras/bulk_sensor.py b/klippy/extras/bulk_sensor.py index 28aed48ab..8f166ec89 100644 --- a/klippy/extras/bulk_sensor.py +++ b/klippy/extras/bulk_sensor.py @@ -5,6 +5,99 @@ # This file may be distributed under the terms of the GNU GPLv3 license. import threading +API_UPDATE_INTERVAL = 0.500 + +# Helper to periodically transmit data to a set of API clients +class APIDumpHelper: + def __init__(self, printer, data_cb, startstop_cb=None, + update_interval=API_UPDATE_INTERVAL): + self.printer = printer + self.data_cb = data_cb + if startstop_cb is None: + startstop_cb = (lambda is_start: None) + self.startstop_cb = startstop_cb + self.is_started = False + self.update_interval = update_interval + self.update_timer = None + self.clients = {} + def _stop(self): + self.clients.clear() + reactor = self.printer.get_reactor() + reactor.unregister_timer(self.update_timer) + self.update_timer = None + if not self.is_started: + return reactor.NEVER + try: + self.startstop_cb(False) + except self.printer.command_error as e: + logging.exception("API Dump Helper stop callback error") + self.clients.clear() + self.is_started = False + if self.clients: + # New client started while in process of stopping + self._start() + return reactor.NEVER + def _start(self): + if self.is_started: + return + self.is_started = True + try: + self.startstop_cb(True) + except self.printer.command_error as e: + logging.exception("API Dump Helper start callback error") + self.is_started = False + self.clients.clear() + raise + reactor = self.printer.get_reactor() + systime = reactor.monotonic() + waketime = systime + self.update_interval + self.update_timer = reactor.register_timer(self._update, waketime) + def add_client(self, web_request): + cconn = web_request.get_client_connection() + template = web_request.get_dict('response_template', {}) + self.clients[cconn] = template + self._start() + def add_internal_client(self): + cconn = InternalDumpClient() + self.clients[cconn] = {} + self._start() + return cconn + def _update(self, eventtime): + try: + msg = self.data_cb(eventtime) + except self.printer.command_error as e: + logging.exception("API Dump Helper data callback error") + return self._stop() + if not msg: + return eventtime + self.update_interval + for cconn, template in list(self.clients.items()): + if cconn.is_closed(): + del self.clients[cconn] + if not self.clients: + return self._stop() + continue + tmp = dict(template) + tmp['params'] = msg + cconn.send(tmp) + return eventtime + self.update_interval + +# An "internal webhooks" wrapper for using APIDumpHelper internally +class InternalDumpClient: + def __init__(self): + self.msgs = [] + self.is_done = False + def get_messages(self): + return self.msgs + def finalize(self): + self.is_done = True + def is_closed(self): + return self.is_done + def send(self, msg): + self.msgs.append(msg) + if len(self.msgs) >= 10000: + # Avoid filling up memory with too many samples + self.finalize() + # Helper class to store incoming messages in a queue class BulkDataQueue: def __init__(self, mcu, msg_name, oid): diff --git a/klippy/extras/lis2dw.py b/klippy/extras/lis2dw.py index 82673fc8c..61d9add78 100644 --- a/klippy/extras/lis2dw.py +++ b/klippy/extras/lis2dw.py @@ -5,7 +5,7 @@ # # This file may be distributed under the terms of the GNU GPLv3 license. import logging -from . import bus, motion_report, adxl345, bulk_sensor +from . import bus, adxl345, bulk_sensor # LIS2DW registers REG_LIS2DW_WHO_AM_I_ADDR = 0x0F @@ -63,7 +63,7 @@ def __init__(self, config): BYTES_PER_SAMPLE) self.last_error_count = 0 # API server endpoints - self.api_dump = motion_report.APIDumpHelper( + self.api_dump = bulk_sensor.APIDumpHelper( self.printer, self._api_update, self._api_startstop, API_UPDATES) self.name = config.get_name().split()[-1] wh = self.printer.lookup_object('webhooks') diff --git a/klippy/extras/motion_report.py b/klippy/extras/motion_report.py index d32de43f6..f840f516e 100644 --- a/klippy/extras/motion_report.py +++ b/klippy/extras/motion_report.py @@ -5,99 +5,7 @@ # This file may be distributed under the terms of the GNU GPLv3 license. import logging import chelper - -API_UPDATE_INTERVAL = 0.500 - -# Helper to periodically transmit data to a set of API clients -class APIDumpHelper: - def __init__(self, printer, data_cb, startstop_cb=None, - update_interval=API_UPDATE_INTERVAL): - self.printer = printer - self.data_cb = data_cb - if startstop_cb is None: - startstop_cb = (lambda is_start: None) - self.startstop_cb = startstop_cb - self.is_started = False - self.update_interval = update_interval - self.update_timer = None - self.clients = {} - def _stop(self): - self.clients.clear() - reactor = self.printer.get_reactor() - reactor.unregister_timer(self.update_timer) - self.update_timer = None - if not self.is_started: - return reactor.NEVER - try: - self.startstop_cb(False) - except self.printer.command_error as e: - logging.exception("API Dump Helper stop callback error") - self.clients.clear() - self.is_started = False - if self.clients: - # New client started while in process of stopping - self._start() - return reactor.NEVER - def _start(self): - if self.is_started: - return - self.is_started = True - try: - self.startstop_cb(True) - except self.printer.command_error as e: - logging.exception("API Dump Helper start callback error") - self.is_started = False - self.clients.clear() - raise - reactor = self.printer.get_reactor() - systime = reactor.monotonic() - waketime = systime + self.update_interval - self.update_timer = reactor.register_timer(self._update, waketime) - def add_client(self, web_request): - cconn = web_request.get_client_connection() - template = web_request.get_dict('response_template', {}) - self.clients[cconn] = template - self._start() - def add_internal_client(self): - cconn = InternalDumpClient() - self.clients[cconn] = {} - self._start() - return cconn - def _update(self, eventtime): - try: - msg = self.data_cb(eventtime) - except self.printer.command_error as e: - logging.exception("API Dump Helper data callback error") - return self._stop() - if not msg: - return eventtime + self.update_interval - for cconn, template in list(self.clients.items()): - if cconn.is_closed(): - del self.clients[cconn] - if not self.clients: - return self._stop() - continue - tmp = dict(template) - tmp['params'] = msg - cconn.send(tmp) - return eventtime + self.update_interval - -# An "internal webhooks" wrapper for using APIDumpHelper internally -class InternalDumpClient: - def __init__(self): - self.msgs = [] - self.is_done = False - def get_messages(self): - return self.msgs - def finalize(self): - self.is_done = True - def is_closed(self): - return self.is_done - def send(self, msg): - self.msgs.append(msg) - if len(self.msgs) >= 10000: - # Avoid filling up memory with too many samples - self.finalize() +from . import bulk_sensor # Extract stepper queue_step messages class DumpStepper: @@ -105,7 +13,7 @@ def __init__(self, printer, mcu_stepper): self.printer = printer self.mcu_stepper = mcu_stepper self.last_api_clock = 0 - self.api_dump = APIDumpHelper(printer, self._api_update) + self.api_dump = bulk_sensor.APIDumpHelper(printer, self._api_update) wh = self.printer.lookup_object('webhooks') wh.register_mux_endpoint("motion_report/dump_stepper", "name", mcu_stepper.get_name(), self._add_api_client) @@ -168,7 +76,7 @@ def __init__(self, printer, name, trapq): self.name = name self.trapq = trapq self.last_api_msg = (0., 0.) - self.api_dump = APIDumpHelper(printer, self._api_update) + self.api_dump = bulk_sensor.APIDumpHelper(printer, self._api_update) wh = self.printer.lookup_object('webhooks') wh.register_mux_endpoint("motion_report/dump_trapq", "name", name, self._add_api_client) diff --git a/klippy/extras/mpu9250.py b/klippy/extras/mpu9250.py index d9eb242eb..6d5cdcf79 100644 --- a/klippy/extras/mpu9250.py +++ b/klippy/extras/mpu9250.py @@ -5,7 +5,7 @@ # # This file may be distributed under the terms of the GNU GPLv3 license. import logging, time -from . import bus, motion_report, adxl345, bulk_sensor +from . import bus, adxl345, bulk_sensor MPU9250_ADDR = 0x68 @@ -80,7 +80,7 @@ def __init__(self, config): BYTES_PER_SAMPLE) self.last_error_count = 0 # API server endpoints - self.api_dump = motion_report.APIDumpHelper( + self.api_dump = bulk_sensor.APIDumpHelper( self.printer, self._api_update, self._api_startstop, API_UPDATES) self.name = config.get_name().split()[-1] wh = self.printer.lookup_object('webhooks') From acde3720a4ee8caefacd39d39ea2e6d540e37453 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Sat, 16 Dec 2023 14:31:32 -0500 Subject: [PATCH 13/63] bulk_sensor: New add_mux_endpoint() helper function in APIDumpHelper Signed-off-by: Kevin O'Connor --- klippy/extras/adxl345.py | 10 +++------- klippy/extras/angle.py | 10 +++------- klippy/extras/bulk_sensor.py | 29 +++++++++++++++++++---------- klippy/extras/lis2dw.py | 10 +++------- klippy/extras/motion_report.py | 22 +++++++--------------- klippy/extras/mpu9250.py | 10 +++------- 6 files changed, 38 insertions(+), 53 deletions(-) diff --git a/klippy/extras/adxl345.py b/klippy/extras/adxl345.py index 6871ef535..d618598a5 100644 --- a/klippy/extras/adxl345.py +++ b/klippy/extras/adxl345.py @@ -220,9 +220,9 @@ def __init__(self, config): self.api_dump = bulk_sensor.APIDumpHelper( self.printer, self._api_update, self._api_startstop, API_UPDATES) self.name = config.get_name().split()[-1] - wh = self.printer.lookup_object('webhooks') - wh.register_mux_endpoint("adxl345/dump_adxl345", "sensor", self.name, - self._handle_dump_adxl345) + hdr = ('time', 'x_acceleration', 'y_acceleration', 'z_acceleration') + self.api_dump.add_mux_endpoint("adxl345/dump_adxl345", "sensor", + self.name, {'header': hdr}) def _build_config(self): cmdqueue = self.spi.get_command_queue() self.query_adxl345_cmd = self.mcu.lookup_command( @@ -345,10 +345,6 @@ def _api_startstop(self, is_start): self._start_measurements() else: self._finish_measurements() - def _handle_dump_adxl345(self, web_request): - self.api_dump.add_client(web_request) - hdr = ('time', 'x_acceleration', 'y_acceleration', 'z_acceleration') - web_request.send({'header': hdr}) def start_internal_client(self): cconn = self.api_dump.add_internal_client() return AccelQueryHelper(self.printer, cconn) diff --git a/klippy/extras/angle.py b/klippy/extras/angle.py index 066167d0b..229e2946f 100644 --- a/klippy/extras/angle.py +++ b/klippy/extras/angle.py @@ -444,9 +444,9 @@ def __init__(self, config): self.api_dump = bulk_sensor.APIDumpHelper( self.printer, self._api_update, self._api_startstop, 0.100) self.name = config.get_name().split()[1] - wh = self.printer.lookup_object('webhooks') - wh.register_mux_endpoint("angle/dump_angle", "sensor", self.name, - self._handle_dump_angle) + api_resp = {'header': ('time', 'angle')} + self.api_dump.add_mux_endpoint("angle/dump_angle", "sensor", self.name, + api_resp) def _build_config(self): freq = self.mcu.seconds_to_clock(1.) while float(TCODE_ERROR << self.time_shift) / freq < 0.002: @@ -553,10 +553,6 @@ def _api_startstop(self, is_start): self._start_measurements() else: self._finish_measurements() - def _handle_dump_angle(self, web_request): - self.api_dump.add_client(web_request) - hdr = ('time', 'angle') - web_request.send({'header': hdr}) def start_internal_client(self): return self.api_dump.add_internal_client() diff --git a/klippy/extras/bulk_sensor.py b/klippy/extras/bulk_sensor.py index 8f166ec89..4c4f22ef3 100644 --- a/klippy/extras/bulk_sensor.py +++ b/klippy/extras/bulk_sensor.py @@ -20,6 +20,8 @@ def __init__(self, printer, data_cb, startstop_cb=None, self.update_interval = update_interval self.update_timer = None self.clients = {} + self.webhooks_start_resp = {} + # Periodic batch processing def _stop(self): self.clients.clear() reactor = self.printer.get_reactor() @@ -52,16 +54,6 @@ def _start(self): systime = reactor.monotonic() waketime = systime + self.update_interval self.update_timer = reactor.register_timer(self._update, waketime) - def add_client(self, web_request): - cconn = web_request.get_client_connection() - template = web_request.get_dict('response_template', {}) - self.clients[cconn] = template - self._start() - def add_internal_client(self): - cconn = InternalDumpClient() - self.clients[cconn] = {} - self._start() - return cconn def _update(self, eventtime): try: msg = self.data_cb(eventtime) @@ -80,6 +72,23 @@ def _update(self, eventtime): tmp['params'] = msg cconn.send(tmp) return eventtime + self.update_interval + # Internal clients + def add_internal_client(self): + cconn = InternalDumpClient() + self.clients[cconn] = {} + self._start() + return cconn + # Webhooks registration + def _add_api_client(self, web_request): + cconn = web_request.get_client_connection() + template = web_request.get_dict('response_template', {}) + self.clients[cconn] = template + self._start() + web_request.send(self.webhooks_start_resp) + def add_mux_endpoint(self, path, key, value, webhooks_start_resp): + self.webhooks_start_resp = webhooks_start_resp + wh = self.printer.lookup_object('webhooks') + wh.register_mux_endpoint(path, key, value, self._add_api_client) # An "internal webhooks" wrapper for using APIDumpHelper internally class InternalDumpClient: diff --git a/klippy/extras/lis2dw.py b/klippy/extras/lis2dw.py index 61d9add78..5590804ef 100644 --- a/klippy/extras/lis2dw.py +++ b/klippy/extras/lis2dw.py @@ -66,9 +66,9 @@ def __init__(self, config): self.api_dump = bulk_sensor.APIDumpHelper( self.printer, self._api_update, self._api_startstop, API_UPDATES) self.name = config.get_name().split()[-1] - wh = self.printer.lookup_object('webhooks') - wh.register_mux_endpoint("lis2dw/dump_lis2dw", "sensor", self.name, - self._handle_dump_lis2dw) + hdr = ('time', 'x_acceleration', 'y_acceleration', 'z_acceleration') + self.api_dump.add_mux_endpoint("lis2dw/dump_lis2dw", "sensor", + self.name, {'header': hdr}) def _build_config(self): cmdqueue = self.spi.get_command_queue() @@ -193,10 +193,6 @@ def _api_startstop(self, is_start): self._start_measurements() else: self._finish_measurements() - def _handle_dump_lis2dw(self, web_request): - self.api_dump.add_client(web_request) - hdr = ('time', 'x_acceleration', 'y_acceleration', 'z_acceleration') - web_request.send({'header': hdr}) def start_internal_client(self): cconn = self.api_dump.add_internal_client() return adxl345.AccelQueryHelper(self.printer, cconn) diff --git a/klippy/extras/motion_report.py b/klippy/extras/motion_report.py index f840f516e..b34fdc1f3 100644 --- a/klippy/extras/motion_report.py +++ b/klippy/extras/motion_report.py @@ -14,9 +14,9 @@ def __init__(self, printer, mcu_stepper): self.mcu_stepper = mcu_stepper self.last_api_clock = 0 self.api_dump = bulk_sensor.APIDumpHelper(printer, self._api_update) - wh = self.printer.lookup_object('webhooks') - wh.register_mux_endpoint("motion_report/dump_stepper", "name", - mcu_stepper.get_name(), self._add_api_client) + api_resp = {'header': ('interval', 'count', 'add')} + self.api_dump.add_mux_endpoint("motion_report/dump_stepper", "name", + mcu_stepper.get_name(), api_resp) def get_step_queue(self, start_clock, end_clock): mcu_stepper = self.mcu_stepper res = [] @@ -62,10 +62,6 @@ def _api_update(self, eventtime): "start_mcu_position": mcu_pos, "step_distance": step_dist, "first_clock": first_clock, "first_step_time": first_time, "last_clock": last_clock, "last_step_time": last_time} - def _add_api_client(self, web_request): - self.api_dump.add_client(web_request) - hdr = ('interval', 'count', 'add') - web_request.send({'header': hdr}) NEVER_TIME = 9999999999999999. @@ -77,9 +73,10 @@ def __init__(self, printer, name, trapq): self.trapq = trapq self.last_api_msg = (0., 0.) self.api_dump = bulk_sensor.APIDumpHelper(printer, self._api_update) - wh = self.printer.lookup_object('webhooks') - wh.register_mux_endpoint("motion_report/dump_trapq", "name", name, - self._add_api_client) + api_resp = {'header': ('time', 'duration', 'start_velocity', + 'acceleration', 'start_position', 'direction')} + self.api_dump.add_mux_endpoint("motion_report/dump_trapq", "name", name, + api_resp) def extract_trapq(self, start_time, end_time): ffi_main, ffi_lib = chelper.get_ffi() res = [] @@ -130,11 +127,6 @@ def _api_update(self, eventtime): return {} self.last_api_msg = d[-1] return {"data": d} - def _add_api_client(self, web_request): - self.api_dump.add_client(web_request) - hdr = ('time', 'duration', 'start_velocity', 'acceleration', - 'start_position', 'direction') - web_request.send({'header': hdr}) STATUS_REFRESH_TIME = 0.250 diff --git a/klippy/extras/mpu9250.py b/klippy/extras/mpu9250.py index 6d5cdcf79..883f399ee 100644 --- a/klippy/extras/mpu9250.py +++ b/klippy/extras/mpu9250.py @@ -83,9 +83,9 @@ def __init__(self, config): self.api_dump = bulk_sensor.APIDumpHelper( self.printer, self._api_update, self._api_startstop, API_UPDATES) self.name = config.get_name().split()[-1] - wh = self.printer.lookup_object('webhooks') - wh.register_mux_endpoint("mpu9250/dump_mpu9250", "sensor", self.name, - self._handle_dump_mpu9250) + hdr = ('time', 'x_acceleration', 'y_acceleration', 'z_acceleration') + self.api_dump.add_mux_endpoint("mpu9250/dump_mpu9250", "sensor", + self.name, {'header': hdr}) def _build_config(self): cmdqueue = self.i2c.get_command_queue() self.mcu.add_config_cmd("config_mpu9250 oid=%d i2c_oid=%d" @@ -207,10 +207,6 @@ def _api_startstop(self, is_start): self._start_measurements() else: self._finish_measurements() - def _handle_dump_mpu9250(self, web_request): - self.api_dump.add_client(web_request) - hdr = ('time', 'x_acceleration', 'y_acceleration', 'z_acceleration') - web_request.send({'header': hdr}) def start_internal_client(self): cconn = self.api_dump.add_internal_client() return adxl345.AccelQueryHelper(self.printer, cconn) From 95c753292d7cb6f577e3ed2b644c2d84c7158e4a Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Sat, 16 Dec 2023 21:59:53 -0500 Subject: [PATCH 14/63] bulk_sensor: Minor code reorg to _stop() in APIDumpHelper() Signed-off-by: Kevin O'Connor --- klippy/extras/bulk_sensor.py | 38 ++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/klippy/extras/bulk_sensor.py b/klippy/extras/bulk_sensor.py index 4c4f22ef3..826bf74c1 100644 --- a/klippy/extras/bulk_sensor.py +++ b/klippy/extras/bulk_sensor.py @@ -22,23 +22,6 @@ def __init__(self, printer, data_cb, startstop_cb=None, self.clients = {} self.webhooks_start_resp = {} # Periodic batch processing - def _stop(self): - self.clients.clear() - reactor = self.printer.get_reactor() - reactor.unregister_timer(self.update_timer) - self.update_timer = None - if not self.is_started: - return reactor.NEVER - try: - self.startstop_cb(False) - except self.printer.command_error as e: - logging.exception("API Dump Helper stop callback error") - self.clients.clear() - self.is_started = False - if self.clients: - # New client started while in process of stopping - self._start() - return reactor.NEVER def _start(self): if self.is_started: return @@ -54,19 +37,36 @@ def _start(self): systime = reactor.monotonic() waketime = systime + self.update_interval self.update_timer = reactor.register_timer(self._update, waketime) + def _stop(self): + self.clients.clear() + self.printer.get_reactor().unregister_timer(self.update_timer) + self.update_timer = None + if not self.is_started: + return + try: + self.startstop_cb(False) + except self.printer.command_error as e: + logging.exception("API Dump Helper stop callback error") + self.clients.clear() + self.is_started = False + if self.clients: + # New client started while in process of stopping + self._start() def _update(self, eventtime): try: msg = self.data_cb(eventtime) except self.printer.command_error as e: logging.exception("API Dump Helper data callback error") - return self._stop() + self._stop() + return self.printer.get_reactor().NEVER if not msg: return eventtime + self.update_interval for cconn, template in list(self.clients.items()): if cconn.is_closed(): del self.clients[cconn] if not self.clients: - return self._stop() + self._stop() + return self.printer.get_reactor().NEVER continue tmp = dict(template) tmp['params'] = msg From 337013459303a220e1c3552583676c35b4800dd0 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Sat, 16 Dec 2023 23:26:42 -0500 Subject: [PATCH 15/63] bulk_sensor: Rework APIDumpHelper() to BatchBulkHelper() The APIDumpHelper class is mainly intended to help process messages in batches. Rework the class methods to make that more clear. Signed-off-by: Kevin O'Connor --- klippy/extras/adxl345.py | 32 +++++++++----------- klippy/extras/angle.py | 51 ++++++++++++++++---------------- klippy/extras/bulk_sensor.py | 53 ++++++++++++++++++---------------- klippy/extras/lis2dw.py | 33 +++++++++------------ klippy/extras/motion_report.py | 32 ++++++++++---------- klippy/extras/mpu9250.py | 35 +++++++++------------- 6 files changed, 112 insertions(+), 124 deletions(-) diff --git a/klippy/extras/adxl345.py b/klippy/extras/adxl345.py index d618598a5..1dfb6bc70 100644 --- a/klippy/extras/adxl345.py +++ b/klippy/extras/adxl345.py @@ -187,7 +187,7 @@ def read_axes_map(config): BYTES_PER_SAMPLE = 5 SAMPLES_PER_BLOCK = 10 -API_UPDATES = 0.100 +BATCH_UPDATES = 0.100 # Printer class that controls ADXL345 chip class ADXL345: @@ -211,18 +211,19 @@ def __init__(self, config): mcu.register_config_callback(self._build_config) self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, "adxl345_data", oid) # Clock tracking - chip_smooth = self.data_rate * API_UPDATES * 2 + chip_smooth = self.data_rate * BATCH_UPDATES * 2 self.clock_sync = bulk_sensor.ClockSyncRegression(mcu, chip_smooth) self.clock_updater = bulk_sensor.ChipClockUpdater(self.clock_sync, BYTES_PER_SAMPLE) self.last_error_count = 0 - # API server endpoints - self.api_dump = bulk_sensor.APIDumpHelper( - self.printer, self._api_update, self._api_startstop, API_UPDATES) + # Process messages in batches + self.batch_bulk = bulk_sensor.BatchBulkHelper( + self.printer, self._process_batch, + self._start_measurements, self._finish_measurements, BATCH_UPDATES) self.name = config.get_name().split()[-1] hdr = ('time', 'x_acceleration', 'y_acceleration', 'z_acceleration') - self.api_dump.add_mux_endpoint("adxl345/dump_adxl345", "sensor", - self.name, {'header': hdr}) + self.batch_bulk.add_mux_endpoint("adxl345/dump_adxl345", "sensor", + self.name, {'header': hdr}) def _build_config(self): cmdqueue = self.spi.get_command_queue() self.query_adxl345_cmd = self.mcu.lookup_command( @@ -248,7 +249,10 @@ def set_reg(self, reg, val, minclock=0): "This is generally indicative of connection problems " "(e.g. faulty wiring) or a faulty adxl345 chip." % ( reg, val, stored_val)) - # Measurement collection + def start_internal_client(self): + cconn = self.batch_bulk.add_internal_client() + return AccelQueryHelper(self.printer, cconn) + # Measurement decoding def _extract_samples(self, raw_samples): # Load variables to optimize inner loop below (x_pos, x_scale), (y_pos, y_scale), (z_pos, z_scale) = self.axes_map @@ -294,6 +298,7 @@ def _update_clock(self, minclock=0): else: raise self.printer.command_error("Unable to query adxl345 fifo") self.clock_updater.update_clock(params) + # Start, stop, and process message batches def _start_measurements(self): # In case of miswiring, testing ADXL345 device ID prevents treating # noise or wrong signal as a correctly initialized device @@ -329,8 +334,7 @@ def _finish_measurements(self): params = self.query_adxl345_end_cmd.send([self.oid, 0, 0]) self.bulk_queue.clear_samples() logging.info("ADXL345 finished '%s' measurements", self.name) - # API interface - def _api_update(self, eventtime): + def _process_batch(self, eventtime): self._update_clock() raw_samples = self.bulk_queue.pull_samples() if not raw_samples: @@ -340,14 +344,6 @@ def _api_update(self, eventtime): return {} return {'data': samples, 'errors': self.last_error_count, 'overflows': self.clock_updater.get_last_limit_count()} - def _api_startstop(self, is_start): - if is_start: - self._start_measurements() - else: - self._finish_measurements() - def start_internal_client(self): - cconn = self.api_dump.add_internal_client() - return AccelQueryHelper(self.printer, cconn) def load_config(config): return ADXL345(config) diff --git a/klippy/extras/angle.py b/klippy/extras/angle.py index 229e2946f..0fe053df3 100644 --- a/klippy/extras/angle.py +++ b/klippy/extras/angle.py @@ -410,6 +410,7 @@ def cmd_ANGLE_DEBUG_WRITE(self, gcmd): SAMPLES_PER_BLOCK = 16 SAMPLE_PERIOD = 0.000400 +BATCH_UPDATES = 0.100 class Angle: def __init__(self, config): @@ -440,13 +441,14 @@ def __init__(self, config): % (oid,), on_restart=True) mcu.register_config_callback(self._build_config) self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, "spi_angle_data", oid) - # API server endpoints - self.api_dump = bulk_sensor.APIDumpHelper( - self.printer, self._api_update, self._api_startstop, 0.100) + # Process messages in batches + self.batch_bulk = bulk_sensor.BatchBulkHelper( + self.printer, self._process_batch, + self._start_measurements, self._finish_measurements, BATCH_UPDATES) self.name = config.get_name().split()[1] api_resp = {'header': ('time', 'angle')} - self.api_dump.add_mux_endpoint("angle/dump_angle", "sensor", self.name, - api_resp) + self.batch_bulk.add_mux_endpoint("angle/dump_angle", + "sensor", self.name, api_resp) def _build_config(self): freq = self.mcu.seconds_to_clock(1.) while float(TCODE_ERROR << self.time_shift) / freq < 0.002: @@ -460,7 +462,9 @@ def _build_config(self): "spi_angle_end oid=%c sequence=%hu", oid=self.oid, cq=cmdqueue) def get_status(self, eventtime=None): return {'temperature': self.sensor_helper.last_temperature} - # Measurement collection + def start_internal_client(self): + return self.batch_bulk.add_internal_client() + # Measurement decoding def _extract_samples(self, raw_samples): # Load variables to optimize inner loop below sample_ticks = self.sample_ticks @@ -516,19 +520,9 @@ def _extract_samples(self, raw_samples): self.last_angle = last_angle del samples[count:] return samples, error_count - # API interface - def _api_update(self, eventtime): - if self.sensor_helper.is_tcode_absolute: - self.sensor_helper.update_clock() - raw_samples = self.bulk_queue.pull_samples() - if not raw_samples: - return {} - samples, error_count = self._extract_samples(raw_samples) - if not samples: - return {} - offset = self.calibration.apply_calibration(samples) - return {'data': samples, 'errors': error_count, - 'position_offset': offset} + # Start, stop, and process message batches + def _is_measuring(self): + return self.start_clock != 0 def _start_measurements(self): logging.info("Starting angle '%s' measurements", self.name) self.sensor_helper.start() @@ -548,13 +542,18 @@ def _finish_measurements(self): self.bulk_queue.clear_samples() self.sensor_helper.last_temperature = None logging.info("Stopped angle '%s' measurements", self.name) - def _api_startstop(self, is_start): - if is_start: - self._start_measurements() - else: - self._finish_measurements() - def start_internal_client(self): - return self.api_dump.add_internal_client() + def _process_batch(self, eventtime): + if self.sensor_helper.is_tcode_absolute: + self.sensor_helper.update_clock() + raw_samples = self.bulk_queue.pull_samples() + if not raw_samples: + return {} + samples, error_count = self._extract_samples(raw_samples) + if not samples: + return {} + offset = self.calibration.apply_calibration(samples) + return {'data': samples, 'errors': error_count, + 'position_offset': offset} def load_config_prefix(config): return Angle(config) diff --git a/klippy/extras/bulk_sensor.py b/klippy/extras/bulk_sensor.py index 826bf74c1..95f6201e3 100644 --- a/klippy/extras/bulk_sensor.py +++ b/klippy/extras/bulk_sensor.py @@ -5,20 +5,23 @@ # This file may be distributed under the terms of the GNU GPLv3 license. import threading -API_UPDATE_INTERVAL = 0.500 +BATCH_INTERVAL = 0.500 -# Helper to periodically transmit data to a set of API clients -class APIDumpHelper: - def __init__(self, printer, data_cb, startstop_cb=None, - update_interval=API_UPDATE_INTERVAL): +# Helper to process accumulated messages in periodic batches +class BatchBulkHelper: + def __init__(self, printer, batch_cb, start_cb=None, stop_cb=None, + batch_interval=BATCH_INTERVAL): self.printer = printer - self.data_cb = data_cb - if startstop_cb is None: - startstop_cb = (lambda is_start: None) - self.startstop_cb = startstop_cb + self.batch_cb = batch_cb + if start_cb is None: + start_cb = (lambda: None) + self.start_cb = start_cb + if stop_cb is None: + stop_cb = (lambda: None) + self.stop_cb = stop_cb self.is_started = False - self.update_interval = update_interval - self.update_timer = None + self.batch_interval = batch_interval + self.batch_timer = None self.clients = {} self.webhooks_start_resp = {} # Periodic batch processing @@ -27,40 +30,40 @@ def _start(self): return self.is_started = True try: - self.startstop_cb(True) + self.start_cb() except self.printer.command_error as e: - logging.exception("API Dump Helper start callback error") + logging.exception("BatchBulkHelper start callback error") self.is_started = False self.clients.clear() raise reactor = self.printer.get_reactor() systime = reactor.monotonic() - waketime = systime + self.update_interval - self.update_timer = reactor.register_timer(self._update, waketime) + waketime = systime + self.batch_interval + self.batch_timer = reactor.register_timer(self._proc_batch, waketime) def _stop(self): self.clients.clear() - self.printer.get_reactor().unregister_timer(self.update_timer) - self.update_timer = None + self.printer.get_reactor().unregister_timer(self.batch_timer) + self.batch_timer = None if not self.is_started: return try: - self.startstop_cb(False) + self.stop_cb() except self.printer.command_error as e: - logging.exception("API Dump Helper stop callback error") + logging.exception("BatchBulkHelper stop callback error") self.clients.clear() self.is_started = False if self.clients: # New client started while in process of stopping self._start() - def _update(self, eventtime): + def _proc_batch(self, eventtime): try: - msg = self.data_cb(eventtime) + msg = self.batch_cb(eventtime) except self.printer.command_error as e: - logging.exception("API Dump Helper data callback error") + logging.exception("BatchBulkHelper batch callback error") self._stop() return self.printer.get_reactor().NEVER if not msg: - return eventtime + self.update_interval + return eventtime + self.batch_interval for cconn, template in list(self.clients.items()): if cconn.is_closed(): del self.clients[cconn] @@ -71,7 +74,7 @@ def _update(self, eventtime): tmp = dict(template) tmp['params'] = msg cconn.send(tmp) - return eventtime + self.update_interval + return eventtime + self.batch_interval # Internal clients def add_internal_client(self): cconn = InternalDumpClient() @@ -90,7 +93,7 @@ def add_mux_endpoint(self, path, key, value, webhooks_start_resp): wh = self.printer.lookup_object('webhooks') wh.register_mux_endpoint(path, key, value, self._add_api_client) -# An "internal webhooks" wrapper for using APIDumpHelper internally +# An "internal webhooks" wrapper for using BatchBulkHelper internally class InternalDumpClient: def __init__(self): self.msgs = [] diff --git a/klippy/extras/lis2dw.py b/klippy/extras/lis2dw.py index 5590804ef..739c3641d 100644 --- a/klippy/extras/lis2dw.py +++ b/klippy/extras/lis2dw.py @@ -35,7 +35,7 @@ BYTES_PER_SAMPLE = 6 SAMPLES_PER_BLOCK = 8 -API_UPDATES = 0.100 +BATCH_UPDATES = 0.100 # Printer class that controls LIS2DW chip class LIS2DW: @@ -57,18 +57,19 @@ def __init__(self, config): mcu.register_config_callback(self._build_config) self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, "lis2dw_data", oid) # Clock tracking - chip_smooth = self.data_rate * API_UPDATES * 2 + chip_smooth = self.data_rate * BATCH_UPDATES * 2 self.clock_sync = bulk_sensor.ClockSyncRegression(mcu, chip_smooth) self.clock_updater = bulk_sensor.ChipClockUpdater(self.clock_sync, BYTES_PER_SAMPLE) self.last_error_count = 0 - # API server endpoints - self.api_dump = bulk_sensor.APIDumpHelper( - self.printer, self._api_update, self._api_startstop, API_UPDATES) + # Process messages in batches + self.batch_bulk = bulk_sensor.BatchBulkHelper( + self.printer, self._process_batch, + self._start_measurements, self._finish_measurements, BATCH_UPDATES) self.name = config.get_name().split()[-1] hdr = ('time', 'x_acceleration', 'y_acceleration', 'z_acceleration') - self.api_dump.add_mux_endpoint("lis2dw/dump_lis2dw", "sensor", - self.name, {'header': hdr}) + self.batch_bulk.add_mux_endpoint("lis2dw/dump_lis2dw", "sensor", + self.name, {'header': hdr}) def _build_config(self): cmdqueue = self.spi.get_command_queue() @@ -95,7 +96,10 @@ def set_reg(self, reg, val, minclock=0): "This is generally indicative of connection problems " "(e.g. faulty wiring) or a faulty lis2dw chip." % ( reg, val, stored_val)) - # Measurement collection + def start_internal_client(self): + cconn = self.bulk_batch.add_internal_client() + return adxl345.AccelQueryHelper(self.printer, cconn) + # Measurement decoding def _extract_samples(self, raw_samples): # Load variables to optimize inner loop below (x_pos, x_scale), (y_pos, y_scale), (z_pos, z_scale) = self.axes_map @@ -136,6 +140,7 @@ def _update_clock(self, minclock=0): params = self.query_lis2dw_status_cmd.send([self.oid], minclock=minclock) self.clock_updater.update_clock(params) + # Start, stop, and process message batches def _start_measurements(self): # In case of miswiring, testing LIS2DW device ID prevents treating # noise or wrong signal as a correctly initialized device @@ -177,8 +182,7 @@ def _finish_measurements(self): self.bulk_queue.clear_samples() logging.info("LIS2DW finished '%s' measurements", self.name) self.set_reg(REG_LIS2DW_FIFO_CTRL, 0x00) - # API interface - def _api_update(self, eventtime): + def _process_batch(self, eventtime): self._update_clock() raw_samples = self.bulk_queue.pull_samples() if not raw_samples: @@ -188,15 +192,6 @@ def _api_update(self, eventtime): return {} return {'data': samples, 'errors': self.last_error_count, 'overflows': self.clock_updater.get_last_limit_count()} - def _api_startstop(self, is_start): - if is_start: - self._start_measurements() - else: - self._finish_measurements() - def start_internal_client(self): - cconn = self.api_dump.add_internal_client() - return adxl345.AccelQueryHelper(self.printer, cconn) - def load_config(config): return LIS2DW(config) diff --git a/klippy/extras/motion_report.py b/klippy/extras/motion_report.py index b34fdc1f3..25c1d5e6c 100644 --- a/klippy/extras/motion_report.py +++ b/klippy/extras/motion_report.py @@ -12,11 +12,12 @@ class DumpStepper: def __init__(self, printer, mcu_stepper): self.printer = printer self.mcu_stepper = mcu_stepper - self.last_api_clock = 0 - self.api_dump = bulk_sensor.APIDumpHelper(printer, self._api_update) + self.last_batch_clock = 0 + self.batch_bulk = bulk_sensor.BatchBulkHelper(printer, + self._process_batch) api_resp = {'header': ('interval', 'count', 'add')} - self.api_dump.add_mux_endpoint("motion_report/dump_stepper", "name", - mcu_stepper.get_name(), api_resp) + self.batch_bulk.add_mux_endpoint("motion_report/dump_stepper", "name", + mcu_stepper.get_name(), api_resp) def get_step_queue(self, start_clock, end_clock): mcu_stepper = self.mcu_stepper res = [] @@ -42,15 +43,15 @@ def log_steps(self, data): % (i, s.first_clock, s.start_position, s.interval, s.step_count, s.add)) logging.info('\n'.join(out)) - def _api_update(self, eventtime): - data, cdata = self.get_step_queue(self.last_api_clock, 1<<63) + def _process_batch(self, eventtime): + data, cdata = self.get_step_queue(self.last_batch_clock, 1<<63) if not data: return {} clock_to_print_time = self.mcu_stepper.get_mcu().clock_to_print_time first = data[0] first_clock = first.first_clock first_time = clock_to_print_time(first_clock) - self.last_api_clock = last_clock = data[-1].last_clock + self.last_batch_clock = last_clock = data[-1].last_clock last_time = clock_to_print_time(last_clock) mcu_pos = first.start_position start_position = self.mcu_stepper.mcu_to_commanded_position(mcu_pos) @@ -71,12 +72,13 @@ def __init__(self, printer, name, trapq): self.printer = printer self.name = name self.trapq = trapq - self.last_api_msg = (0., 0.) - self.api_dump = bulk_sensor.APIDumpHelper(printer, self._api_update) + self.last_batch_msg = (0., 0.) + self.batch_bulk = bulk_sensor.BatchBulkHelper(printer, + self._process_batch) api_resp = {'header': ('time', 'duration', 'start_velocity', 'acceleration', 'start_position', 'direction')} - self.api_dump.add_mux_endpoint("motion_report/dump_trapq", "name", name, - api_resp) + self.batch_bulk.add_mux_endpoint("motion_report/dump_trapq", + "name", name, api_resp) def extract_trapq(self, start_time, end_time): ffi_main, ffi_lib = chelper.get_ffi() res = [] @@ -115,17 +117,17 @@ def get_trapq_position(self, print_time): move.start_z + move.z_r * dist) velocity = move.start_v + move.accel * move_time return pos, velocity - def _api_update(self, eventtime): - qtime = self.last_api_msg[0] + min(self.last_api_msg[1], 0.100) + def _process_batch(self, eventtime): + qtime = self.last_batch_msg[0] + min(self.last_batch_msg[1], 0.100) data, cdata = self.extract_trapq(qtime, NEVER_TIME) d = [(m.print_time, m.move_t, m.start_v, m.accel, (m.start_x, m.start_y, m.start_z), (m.x_r, m.y_r, m.z_r)) for m in data] - if d and d[0] == self.last_api_msg: + if d and d[0] == self.last_batch_msg: d.pop(0) if not d: return {} - self.last_api_msg = d[-1] + self.last_batch_msg = d[-1] return {"data": d} STATUS_REFRESH_TIME = 0.250 diff --git a/klippy/extras/mpu9250.py b/klippy/extras/mpu9250.py index 883f399ee..82438ca0a 100644 --- a/klippy/extras/mpu9250.py +++ b/klippy/extras/mpu9250.py @@ -52,7 +52,7 @@ BYTES_PER_SAMPLE = 6 SAMPLES_PER_BLOCK = 8 -API_UPDATES = 0.100 +BATCH_UPDATES = 0.100 # Printer class that controls MPU9250 chip class MPU9250: @@ -74,18 +74,19 @@ def __init__(self, config): mcu.register_config_callback(self._build_config) self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, "mpu9250_data", oid) # Clock tracking - chip_smooth = self.data_rate * API_UPDATES * 2 + chip_smooth = self.data_rate * BATCH_UPDATES * 2 self.clock_sync = bulk_sensor.ClockSyncRegression(mcu, chip_smooth) self.clock_updater = bulk_sensor.ChipClockUpdater(self.clock_sync, BYTES_PER_SAMPLE) self.last_error_count = 0 - # API server endpoints - self.api_dump = bulk_sensor.APIDumpHelper( - self.printer, self._api_update, self._api_startstop, API_UPDATES) + # Process messages in batches + self.batch_bulk = bulk_sensor.BatchBulkHelper( + self.printer, self._process_batch, + self._start_measurements, self._finish_measurements, BATCH_UPDATES) self.name = config.get_name().split()[-1] hdr = ('time', 'x_acceleration', 'y_acceleration', 'z_acceleration') - self.api_dump.add_mux_endpoint("mpu9250/dump_mpu9250", "sensor", - self.name, {'header': hdr}) + self.batch_bulk.add_mux_endpoint("mpu9250/dump_mpu9250", "sensor", + self.name, {'header': hdr}) def _build_config(self): cmdqueue = self.i2c.get_command_queue() self.mcu.add_config_cmd("config_mpu9250 oid=%d i2c_oid=%d" @@ -105,11 +106,12 @@ def _build_config(self): def read_reg(self, reg): params = self.i2c.i2c_read([reg], 1) return bytearray(params['response'])[0] - def set_reg(self, reg, val, minclock=0): self.i2c.i2c_write([reg, val & 0xFF], minclock=minclock) - - # Measurement collection + def start_internal_client(self): + cconn = self.batch_bulk.add_internal_client() + return adxl345.AccelQueryHelper(self.printer, cconn) + # Measurement decoding def _extract_samples(self, raw_samples): # Load variables to optimize inner loop below (x_pos, x_scale), (y_pos, y_scale), (z_pos, z_scale) = self.axes_map @@ -148,6 +150,7 @@ def _update_clock(self, minclock=0): params = self.query_mpu9250_status_cmd.send([self.oid], minclock=minclock) self.clock_updater.update_clock(params) + # Start, stop, and process message batches def _start_measurements(self): # In case of miswiring, testing MPU9250 device ID prevents treating # noise or wrong signal as a correctly initialized device @@ -190,9 +193,7 @@ def _finish_measurements(self): logging.info("MPU9250 finished '%s' measurements", self.name) self.set_reg(REG_PWR_MGMT_1, SET_PWR_MGMT_1_SLEEP) self.set_reg(REG_PWR_MGMT_2, SET_PWR_MGMT_2_OFF) - - # API interface - def _api_update(self, eventtime): + def _process_batch(self, eventtime): self._update_clock() raw_samples = self.bulk_queue.pull_samples() if not raw_samples: @@ -202,14 +203,6 @@ def _api_update(self, eventtime): return {} return {'data': samples, 'errors': self.last_error_count, 'overflows': self.clock_updater.get_last_limit_count()} - def _api_startstop(self, is_start): - if is_start: - self._start_measurements() - else: - self._finish_measurements() - def start_internal_client(self): - cconn = self.api_dump.add_internal_client() - return adxl345.AccelQueryHelper(self.printer, cconn) def load_config(config): return MPU9250(config) From c716edafe291a3d32700becfb67cb1504cd6902b Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Sun, 17 Dec 2023 00:15:55 -0500 Subject: [PATCH 16/63] bulk_sensor: Simplify the registration of internal clients in BatchBulkHelper Previously, the BatchBulkHelper class was designed primarily to register webhook clients, and internal clients used a wrapper class that emulated a webhooks client. Change BatchBulkHelper to support regular internal callbacks, and introduce a new BatchWebhooksClient class that can translate these internal callback to webhooks client messages. This makes it easier to register internal clients that can process the bulk messages every batch interval. Signed-off-by: Kevin O'Connor --- klippy/extras/adxl345.py | 43 ++++++++++++----------- klippy/extras/angle.py | 19 +++++++---- klippy/extras/bulk_sensor.py | 66 +++++++++++++++--------------------- klippy/extras/lis2dw.py | 5 +-- klippy/extras/mpu9250.py | 5 +-- 5 files changed, 69 insertions(+), 69 deletions(-) diff --git a/klippy/extras/adxl345.py b/klippy/extras/adxl345.py index 1dfb6bc70..8f40c7fec 100644 --- a/klippy/extras/adxl345.py +++ b/klippy/extras/adxl345.py @@ -32,26 +32,29 @@ # Helper class to obtain measurements class AccelQueryHelper: - def __init__(self, printer, cconn): + def __init__(self, printer): self.printer = printer - self.cconn = cconn + self.is_finished = False print_time = printer.lookup_object('toolhead').get_last_move_time() self.request_start_time = self.request_end_time = print_time - self.samples = self.raw_samples = [] + self.msgs = [] + self.samples = [] def finish_measurements(self): toolhead = self.printer.lookup_object('toolhead') self.request_end_time = toolhead.get_last_move_time() toolhead.wait_moves() - self.cconn.finalize() - def _get_raw_samples(self): - raw_samples = self.cconn.get_messages() - if raw_samples: - self.raw_samples = raw_samples - return self.raw_samples + self.is_finished = True + def handle_batch(self, msg): + if self.is_finished: + return False + if len(self.msgs) >= 10000: + # Avoid filling up memory with too many samples + return False + self.msgs.append(msg) + return True def has_valid_samples(self): - raw_samples = self._get_raw_samples() - for msg in raw_samples: - data = msg['params']['data'] + for msg in self.msgs: + data = msg['data'] first_sample_time = data[0][0] last_sample_time = data[-1][0] if (first_sample_time > self.request_end_time @@ -60,21 +63,20 @@ def has_valid_samples(self): # The time intervals [first_sample_time, last_sample_time] # and [request_start_time, request_end_time] have non-zero # intersection. It is still theoretically possible that none - # of the samples from raw_samples fall into the time interval + # of the samples from msgs fall into the time interval # [request_start_time, request_end_time] if it is too narrow # or on very heavy data losses. In practice, that interval # is at least 1 second, so this possibility is negligible. return True return False def get_samples(self): - raw_samples = self._get_raw_samples() - if not raw_samples: + if not self.msgs: return self.samples - total = sum([len(m['params']['data']) for m in raw_samples]) + total = sum([len(m['data']) for m in self.msgs]) count = 0 self.samples = samples = [None] * total - for msg in raw_samples: - for samp_time, x, y, z in msg['params']['data']: + for msg in self.msgs: + for samp_time, x, y, z in msg['data']: if samp_time < self.request_start_time: continue if samp_time > self.request_end_time: @@ -250,8 +252,9 @@ def set_reg(self, reg, val, minclock=0): "(e.g. faulty wiring) or a faulty adxl345 chip." % ( reg, val, stored_val)) def start_internal_client(self): - cconn = self.batch_bulk.add_internal_client() - return AccelQueryHelper(self.printer, cconn) + aqh = AccelQueryHelper(self.printer) + self.batch_bulk.add_client(aqh.handle_batch) + return aqh # Measurement decoding def _extract_samples(self, raw_samples): # Load variables to optimize inner loop below diff --git a/klippy/extras/angle.py b/klippy/extras/angle.py index 0fe053df3..b1aa0d967 100644 --- a/klippy/extras/angle.py +++ b/klippy/extras/angle.py @@ -157,8 +157,14 @@ def get_stepper_phase(self): def do_calibration_moves(self): move = self.printer.lookup_object('force_move').manual_move # Start data collection - angle_sensor = self.printer.lookup_object(self.name) - cconn = angle_sensor.start_internal_client() + msgs = [] + is_finished = False + def handle_batch(msg): + if is_finished: + return False + msgs.append(msg) + return True + self.printer.lookup_object(self.name).add_client(handle_batch) # Move stepper several turns (to allow internal sensor calibration) microsteps, full_steps = self.get_microsteps() mcu_stepper = self.mcu_stepper @@ -190,13 +196,12 @@ def do_calibration_moves(self): move(mcu_stepper, .5*rotation_dist + align_dist, move_speed) toolhead.wait_moves() # Finish data collection - cconn.finalize() - msgs = cconn.get_messages() + is_finished = True # Correlate query responses cal = {} step = 0 for msg in msgs: - for query_time, pos in msg['params']['data']: + for query_time, pos in msg['data']: # Add to step tracking while step < len(times) and query_time > times[step][1]: step += 1 @@ -462,8 +467,8 @@ def _build_config(self): "spi_angle_end oid=%c sequence=%hu", oid=self.oid, cq=cmdqueue) def get_status(self, eventtime=None): return {'temperature': self.sensor_helper.last_temperature} - def start_internal_client(self): - return self.batch_bulk.add_internal_client() + def add_client(self, client_cb): + self.batch_bulk.add_client(client_cb) # Measurement decoding def _extract_samples(self, raw_samples): # Load variables to optimize inner loop below diff --git a/klippy/extras/bulk_sensor.py b/klippy/extras/bulk_sensor.py index 95f6201e3..a8497afc1 100644 --- a/klippy/extras/bulk_sensor.py +++ b/klippy/extras/bulk_sensor.py @@ -22,7 +22,7 @@ def __init__(self, printer, batch_cb, start_cb=None, stop_cb=None, self.is_started = False self.batch_interval = batch_interval self.batch_timer = None - self.clients = {} + self.client_cbs = [] self.webhooks_start_resp = {} # Periodic batch processing def _start(self): @@ -34,14 +34,14 @@ def _start(self): except self.printer.command_error as e: logging.exception("BatchBulkHelper start callback error") self.is_started = False - self.clients.clear() + del self.client_cbs[:] raise reactor = self.printer.get_reactor() systime = reactor.monotonic() waketime = systime + self.batch_interval self.batch_timer = reactor.register_timer(self._proc_batch, waketime) def _stop(self): - self.clients.clear() + del self.client_cbs[:] self.printer.get_reactor().unregister_timer(self.batch_timer) self.batch_timer = None if not self.is_started: @@ -50,9 +50,9 @@ def _stop(self): self.stop_cb() except self.printer.command_error as e: logging.exception("BatchBulkHelper stop callback error") - self.clients.clear() + del self.client_cbs[:] self.is_started = False - if self.clients: + if self.client_cbs: # New client started while in process of stopping self._start() def _proc_batch(self, eventtime): @@ -64,51 +64,41 @@ def _proc_batch(self, eventtime): return self.printer.get_reactor().NEVER if not msg: return eventtime + self.batch_interval - for cconn, template in list(self.clients.items()): - if cconn.is_closed(): - del self.clients[cconn] - if not self.clients: + for client_cb in list(self.client_cbs): + res = client_cb(msg) + if not res: + # This client no longer needs updates - unregister it + self.client_cbs.remove(client_cb) + if not self.client_cbs: self._stop() return self.printer.get_reactor().NEVER - continue - tmp = dict(template) - tmp['params'] = msg - cconn.send(tmp) return eventtime + self.batch_interval - # Internal clients - def add_internal_client(self): - cconn = InternalDumpClient() - self.clients[cconn] = {} + # Client registration + def add_client(self, client_cb): + self.client_cbs.append(client_cb) self._start() - return cconn # Webhooks registration def _add_api_client(self, web_request): - cconn = web_request.get_client_connection() - template = web_request.get_dict('response_template', {}) - self.clients[cconn] = template - self._start() + whbatch = BatchWebhooksClient(web_request) + self.add_client(whbatch.handle_batch) web_request.send(self.webhooks_start_resp) def add_mux_endpoint(self, path, key, value, webhooks_start_resp): self.webhooks_start_resp = webhooks_start_resp wh = self.printer.lookup_object('webhooks') wh.register_mux_endpoint(path, key, value, self._add_api_client) -# An "internal webhooks" wrapper for using BatchBulkHelper internally -class InternalDumpClient: - def __init__(self): - self.msgs = [] - self.is_done = False - def get_messages(self): - return self.msgs - def finalize(self): - self.is_done = True - def is_closed(self): - return self.is_done - def send(self, msg): - self.msgs.append(msg) - if len(self.msgs) >= 10000: - # Avoid filling up memory with too many samples - self.finalize() +# A webhooks wrapper for use by BatchBulkHelper +class BatchWebhooksClient: + def __init__(self, web_request): + self.cconn = web_request.get_client_connection() + self.template = web_request.get_dict('response_template', {}) + def handle_batch(self, msg): + if self.cconn.is_closed(): + return False + tmp = dict(self.template) + tmp['params'] = msg + self.cconn.send(tmp) + return True # Helper class to store incoming messages in a queue class BulkDataQueue: diff --git a/klippy/extras/lis2dw.py b/klippy/extras/lis2dw.py index 739c3641d..28591c21b 100644 --- a/klippy/extras/lis2dw.py +++ b/klippy/extras/lis2dw.py @@ -97,8 +97,9 @@ def set_reg(self, reg, val, minclock=0): "(e.g. faulty wiring) or a faulty lis2dw chip." % ( reg, val, stored_val)) def start_internal_client(self): - cconn = self.bulk_batch.add_internal_client() - return adxl345.AccelQueryHelper(self.printer, cconn) + aqh = adxl345.AccelQueryHelper(self.printer) + self.batch_bulk.add_client(aqh.handle_batch) + return aqh # Measurement decoding def _extract_samples(self, raw_samples): # Load variables to optimize inner loop below diff --git a/klippy/extras/mpu9250.py b/klippy/extras/mpu9250.py index 82438ca0a..c975f9896 100644 --- a/klippy/extras/mpu9250.py +++ b/klippy/extras/mpu9250.py @@ -109,8 +109,9 @@ def read_reg(self, reg): def set_reg(self, reg, val, minclock=0): self.i2c.i2c_write([reg, val & 0xFF], minclock=minclock) def start_internal_client(self): - cconn = self.batch_bulk.add_internal_client() - return adxl345.AccelQueryHelper(self.printer, cconn) + aqh = adxl345.AccelQueryHelper(self.printer) + self.batch_bulk.add_client(aqh.handle_batch) + return aqh # Measurement decoding def _extract_samples(self, raw_samples): # Load variables to optimize inner loop below From 6f686ddee3c65037bfcdfc93a3d8bf42a488e725 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Sun, 17 Dec 2023 01:12:28 -0500 Subject: [PATCH 17/63] bulk_sensor: Add some module level documentation Signed-off-by: Kevin O'Connor --- klippy/extras/bulk_sensor.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/klippy/extras/bulk_sensor.py b/klippy/extras/bulk_sensor.py index a8497afc1..23c985233 100644 --- a/klippy/extras/bulk_sensor.py +++ b/klippy/extras/bulk_sensor.py @@ -5,6 +5,18 @@ # This file may be distributed under the terms of the GNU GPLv3 license. import threading +# This "bulk sensor" module facilitates the processing of sensor chip +# measurements that do not require the host to respond with low +# latency. This module helps collect these measurements into batches +# that are then processed periodically by the host code (as specified +# by BatchBulkHelper.batch_interval). It supports the collection of +# thousands of sensor measurements per second. +# +# Processing measurements in batches reduces load on the mcu, reduces +# bandwidth to/from the mcu, and reduces load on the host. It also +# makes it easier to export the raw measurements via the webhooks +# system (aka API Server). + BATCH_INTERVAL = 0.500 # Helper to process accumulated messages in periodic batches @@ -119,6 +131,25 @@ def pull_samples(self): def clear_samples(self): self.pull_samples() + +###################################################################### +# Clock synchronization +###################################################################### + +# It is common for sensors to produce measurements at a fixed +# frequency. If the mcu can reliably obtain all of these +# measurements, then the code here can calculate a precision timestamp +# for them. That is, it can determine the actual sensor measurement +# frequency, the time of the first measurement, and thus a precise +# time for all measurements. +# +# This system works by having the mcu periodically report a precision +# timestamp along with the total number of measurements the sensor has +# taken as of that time. In brief, knowing the total number of +# measurements taken over an extended period provides an accurate +# estimate of measurement frequency, which can then also be utilized +# to determine the time of the first measurement. + # Helper class for chip clock synchronization via linear regression class ClockSyncRegression: def __init__(self, mcu, chip_clock_smooth, decay = 1. / 20.): From b502558052f40339baa04fcfcbbeb65aa77dc1d8 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Thu, 28 Dec 2023 13:13:57 -0500 Subject: [PATCH 18/63] bulk_sensor: Fix missing logging import Signed-off-by: Kevin O'Connor --- klippy/extras/bulk_sensor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/klippy/extras/bulk_sensor.py b/klippy/extras/bulk_sensor.py index 23c985233..8d0c05416 100644 --- a/klippy/extras/bulk_sensor.py +++ b/klippy/extras/bulk_sensor.py @@ -3,7 +3,7 @@ # Copyright (C) 2020-2023 Kevin O'Connor # # This file may be distributed under the terms of the GNU GPLv3 license. -import threading +import logging, threading # This "bulk sensor" module facilitates the processing of sensor chip # measurements that do not require the host to respond with low From d7f6348ae6e45e4b566d10974b10ab4bb111222b Mon Sep 17 00:00:00 2001 From: Francois Chagnon Date: Sat, 30 Dec 2023 11:34:21 -0500 Subject: [PATCH 19/63] toolhead: Keep stepcompress move history relative to current time (#6439) Expire history relative to current time rather than last move in history queue Signed-off-by: Francois Chagnon --- klippy/chelper/__init__.py | 6 ++++-- klippy/chelper/stepcompress.c | 29 +++++++++++++++++++++++------ klippy/chelper/stepcompress.h | 3 ++- klippy/chelper/trapq.c | 10 ++++------ klippy/chelper/trapq.h | 3 ++- klippy/extras/force_move.py | 3 ++- klippy/extras/manual_stepper.py | 3 ++- klippy/kinematics/extruder.py | 6 +++--- klippy/mcu.py | 7 +++++-- klippy/toolhead.py | 11 +++++++---- 10 files changed, 54 insertions(+), 27 deletions(-) diff --git a/klippy/chelper/__init__.py b/klippy/chelper/__init__.py index 290234c57..e4199561d 100644 --- a/klippy/chelper/__init__.py +++ b/klippy/chelper/__init__.py @@ -60,7 +60,8 @@ void steppersync_free(struct steppersync *ss); void steppersync_set_time(struct steppersync *ss , double time_offset, double mcu_freq); - int steppersync_flush(struct steppersync *ss, uint64_t move_clock); + int steppersync_flush(struct steppersync *ss, uint64_t move_clock + , uint64_t clear_history_clock); """ defs_itersolve = """ @@ -94,7 +95,8 @@ , double start_pos_x, double start_pos_y, double start_pos_z , double axes_r_x, double axes_r_y, double axes_r_z , double start_v, double cruise_v, double accel); - void trapq_finalize_moves(struct trapq *tq, double print_time); + void trapq_finalize_moves(struct trapq *tq, double print_time + , double clear_history_time); void trapq_set_position(struct trapq *tq, double print_time , double pos_x, double pos_y, double pos_z); int trapq_extract_old(struct trapq *tq, struct pull_move *p, int max diff --git a/klippy/chelper/stepcompress.c b/klippy/chelper/stepcompress.c index e5514b952..310f2bf31 100644 --- a/klippy/chelper/stepcompress.c +++ b/klippy/chelper/stepcompress.c @@ -54,8 +54,6 @@ struct step_move { int16_t add; }; -#define HISTORY_EXPIRE (30.0) - struct history_steps { struct list_node node; uint64_t first_clock, last_clock; @@ -292,6 +290,13 @@ free_history(struct stepcompress *sc, uint64_t end_clock) } } +// Expire the stepcompress history older than the given clock +static void +stepcompress_history_expire(struct stepcompress *sc, uint64_t end_clock) +{ + free_history(sc, end_clock); +} + // Free memory associated with a 'stepcompress' object void __visible stepcompress_free(struct stepcompress *sc) @@ -322,9 +327,6 @@ calc_last_step_print_time(struct stepcompress *sc) { double lsc = sc->last_step_clock; sc->last_step_print_time = sc->mcu_time_offset + (lsc - .5) / sc->mcu_freq; - - if (lsc > sc->mcu_freq * HISTORY_EXPIRE) - free_history(sc, lsc - sc->mcu_freq * HISTORY_EXPIRE); } // Set the conversion rate of 'print_time' to mcu clock @@ -731,6 +733,18 @@ steppersync_set_time(struct steppersync *ss, double time_offset } } +// Expire the stepcompress history before the given clock time +static void +steppersync_history_expire(struct steppersync *ss, uint64_t end_clock) +{ + int i; + for (i = 0; i < ss->sc_num; i++) + { + struct stepcompress *sc = ss->sc_list[i]; + stepcompress_history_expire(sc, end_clock); + } +} + // Implement a binary heap algorithm to track when the next available // 'struct move' in the mcu will be available static void @@ -758,7 +772,8 @@ heap_replace(struct steppersync *ss, uint64_t req_clock) // Find and transmit any scheduled steps prior to the given 'move_clock' int __visible -steppersync_flush(struct steppersync *ss, uint64_t move_clock) +steppersync_flush(struct steppersync *ss, uint64_t move_clock + , uint64_t clear_history_clock) { // Flush each stepcompress to the specified move_clock int i; @@ -806,5 +821,7 @@ steppersync_flush(struct steppersync *ss, uint64_t move_clock) // Transmit commands if (!list_empty(&msgs)) serialqueue_send_batch(ss->sq, ss->cq, &msgs); + + steppersync_history_expire(ss, clear_history_clock); return 0; } diff --git a/klippy/chelper/stepcompress.h b/klippy/chelper/stepcompress.h index bfc0dfcde..c5b40383f 100644 --- a/klippy/chelper/stepcompress.h +++ b/klippy/chelper/stepcompress.h @@ -42,6 +42,7 @@ struct steppersync *steppersync_alloc( void steppersync_free(struct steppersync *ss); void steppersync_set_time(struct steppersync *ss, double time_offset , double mcu_freq); -int steppersync_flush(struct steppersync *ss, uint64_t move_clock); +int steppersync_flush(struct steppersync *ss, uint64_t move_clock + , uint64_t clear_history_clock); #endif // stepcompress.h diff --git a/klippy/chelper/trapq.c b/klippy/chelper/trapq.c index 9b1b501b4..b9930e997 100644 --- a/klippy/chelper/trapq.c +++ b/klippy/chelper/trapq.c @@ -163,11 +163,10 @@ trapq_append(struct trapq *tq, double print_time } } -#define HISTORY_EXPIRE (30.0) - // Expire any moves older than `print_time` from the trapezoid velocity queue void __visible -trapq_finalize_moves(struct trapq *tq, double print_time) +trapq_finalize_moves(struct trapq *tq, double print_time + , double clear_history_time) { struct move *head_sentinel = list_first_entry(&tq->moves, struct move,node); struct move *tail_sentinel = list_last_entry(&tq->moves, struct move, node); @@ -190,10 +189,9 @@ trapq_finalize_moves(struct trapq *tq, double print_time) if (list_empty(&tq->history)) return; struct move *latest = list_first_entry(&tq->history, struct move, node); - double expire_time = latest->print_time + latest->move_t - HISTORY_EXPIRE; for (;;) { struct move *m = list_last_entry(&tq->history, struct move, node); - if (m == latest || m->print_time + m->move_t > expire_time) + if (m == latest || m->print_time + m->move_t > clear_history_time) break; list_del(&m->node); free(m); @@ -206,7 +204,7 @@ trapq_set_position(struct trapq *tq, double print_time , double pos_x, double pos_y, double pos_z) { // Flush all moves from trapq - trapq_finalize_moves(tq, NEVER_TIME); + trapq_finalize_moves(tq, NEVER_TIME, 0); // Prune any moves in the trapq history that were interrupted while (!list_empty(&tq->history)) { diff --git a/klippy/chelper/trapq.h b/klippy/chelper/trapq.h index bd8f4e8c2..c463f0c53 100644 --- a/klippy/chelper/trapq.h +++ b/klippy/chelper/trapq.h @@ -43,7 +43,8 @@ void trapq_append(struct trapq *tq, double print_time , double start_pos_x, double start_pos_y, double start_pos_z , double axes_r_x, double axes_r_y, double axes_r_z , double start_v, double cruise_v, double accel); -void trapq_finalize_moves(struct trapq *tq, double print_time); +void trapq_finalize_moves(struct trapq *tq, double print_time + , double clear_history_time); void trapq_set_position(struct trapq *tq, double print_time , double pos_x, double pos_y, double pos_z); int trapq_extract_old(struct trapq *tq, struct pull_move *p, int max diff --git a/klippy/extras/force_move.py b/klippy/extras/force_move.py index 3c05843b2..7501ea986 100644 --- a/klippy/extras/force_move.py +++ b/klippy/extras/force_move.py @@ -86,7 +86,8 @@ def manual_move(self, stepper, dist, speed, accel=0.): 0., 0., 0., axis_r, 0., 0., 0., cruise_v, accel) print_time = print_time + accel_t + cruise_t + accel_t stepper.generate_steps(print_time) - self.trapq_finalize_moves(self.trapq, print_time + 99999.9) + self.trapq_finalize_moves(self.trapq, print_time + 99999.9, + print_time + 99999.9) stepper.set_trapq(prev_trapq) stepper.set_stepper_kinematics(prev_sk) toolhead.note_kinematic_activity(print_time) diff --git a/klippy/extras/manual_stepper.py b/klippy/extras/manual_stepper.py index 223e13f1d..9f61e0298 100644 --- a/klippy/extras/manual_stepper.py +++ b/klippy/extras/manual_stepper.py @@ -67,7 +67,8 @@ def do_move(self, movepos, speed, accel, sync=True): 0., cruise_v, accel) self.next_cmd_time = self.next_cmd_time + accel_t + cruise_t + accel_t self.rail.generate_steps(self.next_cmd_time) - self.trapq_finalize_moves(self.trapq, self.next_cmd_time + 99999.9) + self.trapq_finalize_moves(self.trapq, self.next_cmd_time + 99999.9, + self.next_cmd_time + 99999.9) toolhead = self.printer.lookup_object('toolhead') toolhead.note_kinematic_activity(self.next_cmd_time) if sync: diff --git a/klippy/kinematics/extruder.py b/klippy/kinematics/extruder.py index ea422b6ec..4fe041c5b 100644 --- a/klippy/kinematics/extruder.py +++ b/klippy/kinematics/extruder.py @@ -211,8 +211,8 @@ def __init__(self, config, extruder_num): gcode.register_mux_command("ACTIVATE_EXTRUDER", "EXTRUDER", self.name, self.cmd_ACTIVATE_EXTRUDER, desc=self.cmd_ACTIVATE_EXTRUDER_help) - def update_move_time(self, flush_time): - self.trapq_finalize_moves(self.trapq, flush_time) + def update_move_time(self, flush_time, clear_history_time): + self.trapq_finalize_moves(self.trapq, flush_time, clear_history_time) def get_status(self, eventtime): sts = self.heater.get_status(eventtime) sts['can_extrude'] = self.heater.can_extrude @@ -313,7 +313,7 @@ def cmd_ACTIVATE_EXTRUDER(self, gcmd): class DummyExtruder: def __init__(self, printer): self.printer = printer - def update_move_time(self, flush_time): + def update_move_time(self, flush_time, clear_history_time): pass def check_move(self, move): raise move.move_error("Extrude when no extruder present") diff --git a/klippy/mcu.py b/klippy/mcu.py index 2d8bacc4a..ab219cae7 100644 --- a/klippy/mcu.py +++ b/klippy/mcu.py @@ -955,7 +955,7 @@ def request_move_queue_slot(self): self._reserved_move_slots += 1 def register_flush_callback(self, callback): self._flush_callbacks.append(callback) - def flush_moves(self, print_time): + def flush_moves(self, print_time, clear_history_time): if self._steppersync is None: return clock = self.print_time_to_clock(print_time) @@ -963,7 +963,10 @@ def flush_moves(self, print_time): return for cb in self._flush_callbacks: cb(print_time, clock) - ret = self._ffi_lib.steppersync_flush(self._steppersync, clock) + clear_history_clock = \ + max(0, self.print_time_to_clock(clear_history_time)) + ret = self._ffi_lib.steppersync_flush(self._steppersync, clock, + clear_history_clock) if ret: raise error("Internal error in MCU '%s' stepcompress" % (self._name,)) diff --git a/klippy/toolhead.py b/klippy/toolhead.py index 125ad282c..051a2c30c 100644 --- a/klippy/toolhead.py +++ b/klippy/toolhead.py @@ -195,6 +195,7 @@ def add_move(self, move): MOVE_BATCH_TIME = 0.500 STEPCOMPRESS_FLUSH_TIME = 0.050 SDS_CHECK_TIME = 0.001 # step+dir+step filter in stepcompress.c +MOVE_HISTORY_EXPIRE = 30. DRIP_SEGMENT_TIME = 0.050 DRIP_TIME = 0.100 @@ -289,13 +290,15 @@ def _advance_flush_time(self, flush_time): for sg in self.step_generators: sg(sg_flush_time) self.last_sg_flush_time = sg_flush_time + clear_history_time = self.mcu.estimated_print_time( + self.reactor.monotonic() - MOVE_HISTORY_EXPIRE) # Free trapq entries that are no longer needed free_time = sg_flush_time - self.kin_flush_delay - self.trapq_finalize_moves(self.trapq, free_time) - self.extruder.update_move_time(free_time) + self.trapq_finalize_moves(self.trapq, free_time, clear_history_time) + self.extruder.update_move_time(free_time, clear_history_time) # Flush stepcompress and mcu steppersync for m in self.all_mcus: - m.flush_moves(flush_time) + m.flush_moves(flush_time, clear_history_time) self.last_flush_time = flush_time def _advance_move_time(self, next_print_time): pt_delay = self.kin_flush_delay + STEPCOMPRESS_FLUSH_TIME @@ -522,7 +525,7 @@ def drip_move(self, newpos, speed, drip_completion): self.move_queue.flush() except DripModeEndSignal as e: self.move_queue.reset() - self.trapq_finalize_moves(self.trapq, self.reactor.NEVER) + self.trapq_finalize_moves(self.trapq, self.reactor.NEVER, 0) # Exit "Drip" state self.reactor.update_timer(self.flush_timer, self.reactor.NOW) self.flush_step_generation() From 9847b44901dfd130c0e07edc9a4ee072417af975 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Sat, 30 Dec 2023 11:43:32 -0500 Subject: [PATCH 20/63] toolhead: Avoid calling reactor.monotonic() on each _advance_flush_time() Move calculation of clear_history_time to the callers of _advance_flush_time() as a minor processing optimization. Signed-off-by: Kevin O'Connor --- klippy/toolhead.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/klippy/toolhead.py b/klippy/toolhead.py index 051a2c30c..3bc74cac8 100644 --- a/klippy/toolhead.py +++ b/klippy/toolhead.py @@ -281,7 +281,7 @@ def __init__(self, config): for module_name in modules: self.printer.load_object(config, module_name) # Print time and flush tracking - def _advance_flush_time(self, flush_time): + def _advance_flush_time(self, flush_time, clear_history_time): flush_time = max(flush_time, self.last_flush_time) # Generate steps via itersolve sg_flush_want = min(flush_time + STEPCOMPRESS_FLUSH_TIME, @@ -290,8 +290,6 @@ def _advance_flush_time(self, flush_time): for sg in self.step_generators: sg(sg_flush_time) self.last_sg_flush_time = sg_flush_time - clear_history_time = self.mcu.estimated_print_time( - self.reactor.monotonic() - MOVE_HISTORY_EXPIRE) # Free trapq entries that are no longer needed free_time = sg_flush_time - self.kin_flush_delay self.trapq_finalize_moves(self.trapq, free_time, clear_history_time) @@ -305,9 +303,11 @@ def _advance_move_time(self, next_print_time): flush_time = max(self.last_flush_time, self.print_time - pt_delay) self.print_time = max(self.print_time, next_print_time) want_flush_time = max(flush_time, self.print_time - pt_delay) + clear_history_time = self.mcu.estimated_print_time( + self.reactor.monotonic()) - MOVE_HISTORY_EXPIRE while 1: flush_time = min(flush_time + MOVE_BATCH_TIME, want_flush_time) - self._advance_flush_time(flush_time) + self._advance_flush_time(flush_time, clear_history_time) if flush_time >= want_flush_time: break def _calc_print_time(self): @@ -359,7 +359,7 @@ def _flush_lookahead(self): self.check_stall_time = 0. def flush_step_generation(self): self._flush_lookahead() - self._advance_flush_time(self.step_gen_time) + self._advance_flush_time(self.step_gen_time, 0.) def get_last_move_time(self): if self.special_queuing_state: self._flush_lookahead() @@ -433,7 +433,8 @@ def _flush_handler(self, eventtime): if buffer_time > BGFLUSH_LOW_TIME: return eventtime + buffer_time - BGFLUSH_LOW_TIME ftime = est_print_time + BGFLUSH_LOW_TIME + BGFLUSH_BATCH_TIME - self._advance_flush_time(min(self.need_flush_time, ftime)) + self._advance_flush_time(min(self.need_flush_time, ftime), + est_print_time - MOVE_HISTORY_EXPIRE) except: logging.exception("Exception in flush_handler") self.printer.invoke_shutdown("Exception in flush_handler") From 25bc649cd263683855e892433ef3f615903d99c7 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Sat, 30 Dec 2023 12:35:42 -0500 Subject: [PATCH 21/63] toolhead: Make sure to flush history when in debug output mode When in debugging "batch mode", use the existing method of keeping the last 30 seconds of history from the furthest planned move time. This avoids keeping all moves in memory during a batch test. Signed-off-by: Kevin O'Connor --- klippy/toolhead.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/klippy/toolhead.py b/klippy/toolhead.py index 3bc74cac8..477b5fce0 100644 --- a/klippy/toolhead.py +++ b/klippy/toolhead.py @@ -240,7 +240,7 @@ def __init__(self, config): self.flush_timer = self.reactor.register_timer(self._flush_handler) self.do_kick_flush_timer = True self.last_flush_time = self.last_sg_flush_time = 0. - self.need_flush_time = self.step_gen_time = 0. + self.need_flush_time = self.step_gen_time = self.clear_history_time = 0. # Kinematic step generation scan window time tracking self.kin_flush_delay = SDS_CHECK_TIME self.kin_flush_times = [] @@ -281,7 +281,7 @@ def __init__(self, config): for module_name in modules: self.printer.load_object(config, module_name) # Print time and flush tracking - def _advance_flush_time(self, flush_time, clear_history_time): + def _advance_flush_time(self, flush_time): flush_time = max(flush_time, self.last_flush_time) # Generate steps via itersolve sg_flush_want = min(flush_time + STEPCOMPRESS_FLUSH_TIME, @@ -291,6 +291,9 @@ def _advance_flush_time(self, flush_time, clear_history_time): sg(sg_flush_time) self.last_sg_flush_time = sg_flush_time # Free trapq entries that are no longer needed + clear_history_time = self.clear_history_time + if not self.can_pause: + clear_history_time = flush_time - MOVE_HISTORY_EXPIRE free_time = sg_flush_time - self.kin_flush_delay self.trapq_finalize_moves(self.trapq, free_time, clear_history_time) self.extruder.update_move_time(free_time, clear_history_time) @@ -303,11 +306,9 @@ def _advance_move_time(self, next_print_time): flush_time = max(self.last_flush_time, self.print_time - pt_delay) self.print_time = max(self.print_time, next_print_time) want_flush_time = max(flush_time, self.print_time - pt_delay) - clear_history_time = self.mcu.estimated_print_time( - self.reactor.monotonic()) - MOVE_HISTORY_EXPIRE while 1: flush_time = min(flush_time + MOVE_BATCH_TIME, want_flush_time) - self._advance_flush_time(flush_time, clear_history_time) + self._advance_flush_time(flush_time) if flush_time >= want_flush_time: break def _calc_print_time(self): @@ -359,7 +360,7 @@ def _flush_lookahead(self): self.check_stall_time = 0. def flush_step_generation(self): self._flush_lookahead() - self._advance_flush_time(self.step_gen_time, 0.) + self._advance_flush_time(self.step_gen_time) def get_last_move_time(self): if self.special_queuing_state: self._flush_lookahead() @@ -433,8 +434,7 @@ def _flush_handler(self, eventtime): if buffer_time > BGFLUSH_LOW_TIME: return eventtime + buffer_time - BGFLUSH_LOW_TIME ftime = est_print_time + BGFLUSH_LOW_TIME + BGFLUSH_BATCH_TIME - self._advance_flush_time(min(self.need_flush_time, ftime), - est_print_time - MOVE_HISTORY_EXPIRE) + self._advance_flush_time(min(self.need_flush_time, ftime)) except: logging.exception("Exception in flush_handler") self.printer.invoke_shutdown("Exception in flush_handler") @@ -535,7 +535,9 @@ def stats(self, eventtime): max_queue_time = max(self.print_time, self.last_flush_time) for m in self.all_mcus: m.check_active(max_queue_time, eventtime) - buffer_time = self.print_time - self.mcu.estimated_print_time(eventtime) + est_print_time = self.mcu.estimated_print_time(eventtime) + self.clear_history_time = est_print_time - MOVE_HISTORY_EXPIRE + buffer_time = self.print_time - est_print_time is_active = buffer_time > -60. or not self.special_queuing_state if self.special_queuing_state == "Drip": buffer_time = 0. From 92fe8f15b82d7c7ccb7f8ac6552259adeac471fb Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Fri, 29 Dec 2023 14:35:36 -0500 Subject: [PATCH 22/63] buttons: Fix possible ordering issue if a callback blocks Invoke button callbacks directly from the background thread. This ensures that button notifications are delivered and delivered in the correct order. Previously, if a callback blocked, it was possible a new update could start before the previous update was completed, which could lead to lost events or out of order events. Signed-off-by: Kevin O'Connor --- klippy/extras/buttons.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/klippy/extras/buttons.py b/klippy/extras/buttons.py index 472742576..70d76a60e 100644 --- a/klippy/extras/buttons.py +++ b/klippy/extras/buttons.py @@ -1,6 +1,6 @@ # Support for button detection and callbacks # -# Copyright (C) 2018 Kevin O'Connor +# Copyright (C) 2018-2023 Kevin O'Connor # # This file may be distributed under the terms of the GNU GPLv3 license. import logging @@ -69,17 +69,17 @@ def handle_buttons_state(self, params): # Send ack to MCU self.ack_cmd.send([self.oid, new_count]) self.ack_count += new_count - # Call self.handle_button() with this event in main thread - for nb in new_buttons: - self.reactor.register_async_callback( - (lambda e, s=self, b=nb: s.handle_button(e, b))) - def handle_button(self, eventtime, button): - button ^= self.invert - changed = button ^ self.last_button - for mask, shift, callback in self.callbacks: - if changed & mask: - callback(eventtime, (button & mask) >> shift) - self.last_button = button + # Invoke callbacks with this event in main thread + btime = params['#receive_time'] + for button in new_buttons: + button ^= self.invert + changed = button ^ self.last_button + self.last_button = button + for mask, shift, callback in self.callbacks: + if changed & mask: + state = (button & mask) >> shift + self.reactor.register_async_callback( + (lambda et, c=callback, bt=btime, s=state: c(bt, s))) ###################################################################### From 0665dc89766bd85c095f18ba84028dc47def2f19 Mon Sep 17 00:00:00 2001 From: I3DBeeTech <129617321+I3DBeeTech@users.noreply.github.com> Date: Tue, 2 Jan 2024 22:01:30 +0530 Subject: [PATCH 23/63] config: I3DBEEZ9 New board (#6447) Signed-off-by: Venkata Kamesh --- config/generic-I3DBEEZ9.cfg | 223 ++++++++++++++++++++++++++++++++++++ test/klippy/printers.test | 3 + 2 files changed, 226 insertions(+) create mode 100644 config/generic-I3DBEEZ9.cfg diff --git a/config/generic-I3DBEEZ9.cfg b/config/generic-I3DBEEZ9.cfg new file mode 100644 index 000000000..abb20d86f --- /dev/null +++ b/config/generic-I3DBEEZ9.cfg @@ -0,0 +1,223 @@ +# This file contains common pin mappings for the I3DBEEZ9 V1.0. +# To use this config, the firmware should be compiled for the +# STM32F407 with a "32KiB bootloader". + +# The "make flash" command does not work on the I3DBEEZ9. Instead, +# after running "make", copy the generated "out/klipper.bin" file to a +# file named "firmware.bin" on an SD card and then restart the I3DBEEZ9 +# with that SD card. + +# See docs/Config_Reference.md for a description of parameters. + +[stepper_x] +step_pin: PE9 +dir_pin: PF1 +enable_pin: !PF2 +microsteps: 16 +rotation_distance: 40 +endstop_pin: PB10 +position_endstop: 0 +position_max: 200 +homing_speed: 50 + +[stepper_y] +step_pin: PE11 +dir_pin: PE1 +enable_pin: !PD7 +microsteps: 16 +rotation_distance: 40 +endstop_pin: PE12 +position_endstop: 0 +position_max: 200 +homing_speed: 50 + +[stepper_z] +step_pin: PE13 +dir_pin: PC2 +enable_pin: !PC0 +microsteps: 16 +rotation_distance: 8 +endstop_pin: PG8 +position_endstop: 0 +position_max: 200 + +[extruder] +step_pin: PE14 +dir_pin: PA0 +enable_pin: !PC3 +microsteps: 16 +rotation_distance: 33.500 +nozzle_diameter: 0.400 +filament_diameter: 1.750 +heater_pin: PB1 # Heat0 +sensor_pin: PF4 # T1 Header +sensor_type: EPCOS 100K B57560G104F +control: pid +pid_Kp: 22.2 +pid_Ki: 1.08 +pid_Kd: 114 +min_temp: 0 +max_temp: 250 + +#[extruder1] +#step_pin: PD15 +#dir_pin: PE7 +#enable_pin: !PA3 +#heater_pin: PD14 # Heat1 +#sensor_pin: PF5 # T2 +#... + +#[extruder2] +#step_pin: PD13 +#dir_pin: PG9 +#enable_pin: !PF0 +#heater_pin: PB0 # Heat2 +#sensor_pin: PF6 # T3 +#... + +#[stepper_z1] +#step_pin: PE4 +#dir_pin: PE3 +#enable_pin: !PC13 +#microsteps: 16 +#rotation_distance: 8 +#endstop_pin: PD0 +#position_endstop: 0.5 +#position_max: 200 + +[heater_bed] +heater_pin: PD12 +sensor_pin: PF3 # T0 +sensor_type: ATC Semitec 104GT-2 +control: watermark +min_temp: 0 +max_temp: 130 + +[fan] +pin: PC8 + +[heater_fan fan1] +pin: PE5 + +#[heater_fan fan2] +#pin: PE6 + +[mcu] +serial: /dev/serial/by-id/usb-Klipper_Klipper_firmware_12345-if00 + +[printer] +kinematics: cartesian +max_velocity: 300 +max_accel: 3000 +max_z_velocity: 5 +max_z_accel: 100 + + +######################################## +# TMC2208 configuration +######################################## + +#[tmc2208 stepper_x] +#uart_pin: PA15 +#run_current: 0.800 +#stealthchop_threshold: 999999 + +#[tmc2208 stepper_y] +#uart_pin: PB8 +#run_current: 0.800 +#stealthchop_threshold: 999999 + +#[tmc2208 stepper_z] +#uart_pin: PB9 +#run_current: 0.650 +#stealthchop_threshold: 999999 + +#[tmc2208 extruder] +#uart_pin: PB3 +#run_current: 0.800 +#stealthchop_threshold: 999999 + +#[tmc2208 extruder1] +#uart_pin: PG15 +#run_current: 0.800 +#stealthchop_threshold: 999999 + +#[tmc2208 extruder2] +#uart_pin: PG12 +#run_current: 0.800 +#stealthchop_threshold: 999999 + +#[tmc2208 stepper_z1] +#uart_pin: PE2 +#run_current: 0.650 +#stealthchop_threshold: 999999 + +######################################## +# TMC2130 configuration +######################################## + +#[tmc2130 stepper_x] +#cs_pin: PA15 +#spi_bus: spi3a +##diag1_pin: PB10 +#run_current: 0.800 +#stealthchop_threshold: 999999 + +#[tmc2130 stepper_y] +#cs_pin: PB8 +#spi_bus: spi3a +##diag1_pin: PE12 +#run_current: 0.800 +#stealthchop_threshold: 999999 + +#[tmc2130 stepper_z] +#cs_pin: PB9 +#spi_bus: spi3a +##diag1_pin: PG8 +#run_current: 0.650 +#stealthchop_threshold: 999999 + +#[tmc2130 extruder] +#cs_pin: PB3 +#spi_bus: spi3a +##diag1_pin: PE15 +#run_current: 0.800 +#stealthchop_threshold: 999999 + +#[tmc2130 extruder1] +#cs_pin: PG15 +#spi_bus: spi3a +##diag1_pin: PE10 +#run_current: 0.800 +#stealthchop_threshold: 999999 + +#[tmc2130 extruder2] +#cs_pin: PG12 +#spi_bus: spi3a +##diag1_pin: PG5 +#run_current: 0.800 +#stealthchop_threshold: 999999 + +#[tmc2130 stepper_z1] +#cs_pin: PE2 +#spi_bus: spi3a +##diag1_pin: PD0 +#run_current: 0.650 +#stealthchop_threshold: 999999 + + +######################################## +# EXP1 / EXP2 (display) pins +######################################## + +[board_pins] +aliases: + # EXP1 header + EXP1_1=PG4, EXP1_3=PD11, EXP1_5=PG2, EXP1_7=PG6, EXP1_9=, + EXP1_2=PA8, EXP1_4=PD10, EXP1_6=PG3, EXP1_8=PG7, EXP1_10=<5V>, + # EXP2 header + EXP2_1=PB14, EXP2_3=PG10, EXP2_5=PF11, EXP2_7=PF12, EXP2_9=, + EXP2_2=PB13, EXP2_4=PB12, EXP2_6=PB15, EXP2_8=, EXP2_10=PF13 + # Pins EXP2_1, EXP2_6, EXP2_2 are also MISO, MOSI, SCK of bus "spi2" + +# See the sample-lcd.cfg file for definitions of common LCD displays. diff --git a/test/klippy/printers.test b/test/klippy/printers.test index 9edc57506..8a876f002 100644 --- a/test/klippy/printers.test +++ b/test/klippy/printers.test @@ -219,6 +219,7 @@ CONFIG ../../config/generic-bigtreetech-gtr.cfg CONFIG ../../config/generic-bigtreetech-skr-pro.cfg CONFIG ../../config/generic-bigtreetech-skr-2.cfg CONFIG ../../config/generic-flyboard.cfg +CONFIG ../../config/generic-I3DBEEZ9.cfg CONFIG ../../config/generic-mellow-fly-cdy-v3.cfg CONFIG ../../config/generic-mellow-super-infinty-hv.cfg CONFIG ../../config/generic-mks-monster8.cfg @@ -228,6 +229,8 @@ CONFIG ../../config/generic-th3d-ezboard-v2.0.cfg CONFIG ../../config/printer-biqu-b1-se-plus-2022.cfg CONFIG ../../config/printer-prusa-mini-plus-2020.cfg + + # Printers using the stm32f429 DICTIONARY stm32f429.dict CONFIG ../../config/generic-bigtreetech-octopus-v1.1.cfg From b50d6669a8b491edf07602c0528d26abe8985536 Mon Sep 17 00:00:00 2001 From: marbocub Date: Sun, 7 Jan 2024 07:26:48 +0900 Subject: [PATCH 24/63] docs: Fix typo in Bed_Mesh.md (#6450) the meaning and the illustration shows 13x9 mesh however the text was 13x8. Signed-off-by: Mitsunori YOSHIDA --- docs/Bed_Mesh.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/Bed_Mesh.md b/docs/Bed_Mesh.md index e759f961d..d2a417dd4 100644 --- a/docs/Bed_Mesh.md +++ b/docs/Bed_Mesh.md @@ -142,7 +142,7 @@ bicubic_tension: 0.2 integer pair, and also may be specified a single integer that is applied to both axes. In this example there are 4 segments along the X axis and 2 segments along the Y axis. This evaluates to 8 interpolated - points along X, 6 interpolated points along Y, which results in a 13x8 + points along X, 6 interpolated points along Y, which results in a 13x9 mesh. Note that if mesh_pps is set to 0 then mesh interpolation is disabled and the probed matrix will be sampled directly. From 1a1568c38b7b4e9bd5358eb0125d54652789d4aa Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Sun, 17 Dec 2023 16:30:11 -0500 Subject: [PATCH 25/63] mpu9250: Fix incorrect use of time.sleep() It is not valid to call time.sleep() in the host python code (it could causes glitches in other processing, and it does not ensure there is a pause between operations on the mcu). Use minclock instead of time.sleep() to ensure there is a sufficient pause during chip startup. Signed-off-by: Kevin O'Connor --- klippy/extras/mpu9250.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/klippy/extras/mpu9250.py b/klippy/extras/mpu9250.py index c975f9896..04a33eb24 100644 --- a/klippy/extras/mpu9250.py +++ b/klippy/extras/mpu9250.py @@ -4,7 +4,7 @@ # Copyright (C) 2020-2021 Kevin O'Connor # # This file may be distributed under the terms of the GNU GPLv3 license. -import logging, time +import logging from . import bus, adxl345, bulk_sensor MPU9250_ADDR = 0x68 @@ -167,8 +167,12 @@ def _start_measurements(self): # Setup chip in requested query rate self.set_reg(REG_PWR_MGMT_1, SET_PWR_MGMT_1_WAKE) self.set_reg(REG_PWR_MGMT_2, SET_PWR_MGMT_2_ACCEL_ON) - time.sleep(20. / 1000) # wait for accelerometer chip wake up - self.set_reg(REG_SMPLRT_DIV, SAMPLE_RATE_DIVS[self.data_rate]) + # Add 20ms pause for accelerometer chip wake up + self.read_reg(REG_DEVID) # Dummy read to ensure queues flushed + systime = self.printer.get_reactor().monotonic() + next_time = self.mcu.estimated_print_time(systime) + 0.020 + self.set_reg(REG_SMPLRT_DIV, SAMPLE_RATE_DIVS[self.data_rate], + minclock=self.mcu.print_time_to_clock(next_time)) self.set_reg(REG_CONFIG, SET_CONFIG) self.set_reg(REG_ACCEL_CONFIG, SET_ACCEL_CONFIG) self.set_reg(REG_ACCEL_CONFIG2, SET_ACCEL_CONFIG2) From 49315b3cc40443be08e1f1c77a656fc103337a33 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Sun, 17 Dec 2023 16:19:21 -0500 Subject: [PATCH 26/63] sensor_mpu9250: Fix timing in command_query_mpu9250_status() Commit 80a7744b optimized the fifo tracking code. However, it introduced an error in the time tracking in command_query_mpu9250_status(). The purpose of that function is to provide a precise timestamp of the total number of messages produced at the time of that call. Thus, the returned fifo value needs to be the fifo level in the chip at the time of the call (not the value read during previous checks). Signed-off-by: Kevin O'Connor --- src/sensor_mpu9250.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/src/sensor_mpu9250.c b/src/sensor_mpu9250.c index 51df5a711..86508a6ea 100644 --- a/src/sensor_mpu9250.c +++ b/src/sensor_mpu9250.c @@ -65,10 +65,10 @@ get_fifo_status (struct mpu9250 *mp) uint8_t reg[] = {AR_FIFO_COUNT_H}; uint8_t msg[2]; i2c_read(mp->i2c->i2c_config, sizeof(reg), reg, sizeof(msg), msg); - msg[0] = 0x1F & msg[0]; // discard 3 MSB per datasheet - uint16_t bytes_to_read = ((uint16_t)msg[0]) << 8 | msg[1]; - if (bytes_to_read > mp->fifo_max) mp->fifo_max = bytes_to_read; - return bytes_to_read; + uint16_t fifo_bytes = ((msg[0] & 0x1f) << 8) | msg[1]; + if (fifo_bytes > mp->fifo_max) + mp->fifo_max = fifo_bytes; + return fifo_bytes; } // Event handler that wakes mpu9250_task() periodically @@ -249,14 +249,15 @@ void command_query_mpu9250_status(uint32_t *args) { struct mpu9250 *mp = oid_lookup(args[0], command_config_mpu9250); + uint8_t reg[] = {AR_FIFO_COUNT_H}; uint8_t msg[2]; + uint32_t time1 = timer_read_time(); - uint8_t reg[] = {AR_FIFO_COUNT_H}; i2c_read(mp->i2c->i2c_config, sizeof(reg), reg, sizeof(msg), msg); uint32_t time2 = timer_read_time(); - msg[0] = 0x1F & msg[0]; // discard 3 MSB - mp9250_status(mp, args[0], time1, time2, mp->fifo_pkts_bytes - / BYTES_PER_FIFO_ENTRY); + + uint16_t fifo_bytes = ((msg[0] & 0x1f) << 8) | msg[1]; + mp9250_status(mp, args[0], time1, time2, fifo_bytes / BYTES_PER_FIFO_ENTRY); } DECL_COMMAND(command_query_mpu9250_status, "query_mpu9250_status oid=%c"); From 84aa3caa45bd18363bef9683981215ca3b169654 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Sun, 17 Dec 2023 17:17:16 -0500 Subject: [PATCH 27/63] sensor_mpu9250: Simplify mp9250_query() The mpu9250 code always reads from the sensor in 48 byte chunks and always sends an mpu9250_data message immediately after that. Make that more clear in the querying code. Signed-off-by: Kevin O'Connor --- src/sensor_mpu9250.c | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/src/sensor_mpu9250.c b/src/sensor_mpu9250.c index 86508a6ea..68dd502fc 100644 --- a/src/sensor_mpu9250.c +++ b/src/sensor_mpu9250.c @@ -40,6 +40,7 @@ #define FIFO_OVERFLOW_INT 0x10 #define BYTES_PER_FIFO_ENTRY 6 +#define BYTES_PER_BLOCK 48 struct mpu9250 { struct timer timer; @@ -49,7 +50,7 @@ struct mpu9250 { uint8_t flags, data_count; // msg size must be <= 255 due to Klipper api // = SAMPLES_PER_BLOCK (from mpu9250.py) * BYTES_PER_FIFO_ENTRY + 1 - uint8_t data[48]; + uint8_t data[BYTES_PER_BLOCK]; }; enum { @@ -126,28 +127,23 @@ mp9250_reschedule_timer(struct mpu9250 *mp) static void mp9250_query(struct mpu9250 *mp, uint8_t oid) { - // Find remaining space in report buffer - uint8_t data_space = sizeof(mp->data) - mp->data_count; - // If not enough bytes to fill report read MPU FIFO's fill - if (mp->fifo_pkts_bytes < data_space) { - mp->fifo_pkts_bytes = get_fifo_status(mp) / BYTES_PER_FIFO_ENTRY - * BYTES_PER_FIFO_ENTRY; - } + if (mp->fifo_pkts_bytes < BYTES_PER_BLOCK) + mp->fifo_pkts_bytes = get_fifo_status(mp); // If we have enough bytes to fill the buffer do it and send report - if (mp->fifo_pkts_bytes >= data_space) { + if (mp->fifo_pkts_bytes >= BYTES_PER_BLOCK) { uint8_t reg = AR_FIFO; - i2c_read(mp->i2c->i2c_config, sizeof(reg), ®, - data_space, &mp->data[mp->data_count]); - mp->data_count += data_space; - mp->fifo_pkts_bytes -= data_space; + i2c_read(mp->i2c->i2c_config, sizeof(reg), ® + , BYTES_PER_BLOCK, &mp->data[0]); + mp->data_count = BYTES_PER_BLOCK; + mp->fifo_pkts_bytes -= BYTES_PER_BLOCK; mp9250_report(mp, oid); } // If we have enough bytes remaining to fill another report wake again // otherwise schedule timed wakeup - if (mp->fifo_pkts_bytes > data_space) { + if (mp->fifo_pkts_bytes >= BYTES_PER_BLOCK) { sched_wake_task(&mpu9250_wake); } else if (mp->flags & AX_RUNNING) { sched_del_timer(&mp->timer); From 96ab906946ed558b961a7d8e0e56fb6aebf15316 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Wed, 20 Dec 2023 16:48:29 -0500 Subject: [PATCH 28/63] sensor_mpu9250: Check for overflows on each query_mpu9250_status command Move overflow detection from mp9250_stop() to command_query_mpu9250_status(). Currently the host ignores any contents returned from a stop request, so overflow reporting at that point has limited utility. In practice, this change will result in one additional i2c transaction to the mpu9250 device every 100ms. Signed-off-by: Kevin O'Connor --- src/sensor_mpu9250.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/src/sensor_mpu9250.c b/src/sensor_mpu9250.c index 68dd502fc..c535d0971 100644 --- a/src/sensor_mpu9250.c +++ b/src/sensor_mpu9250.c @@ -196,14 +196,6 @@ mp9250_stop(struct mpu9250 *mp, uint8_t oid) i2c_write(mp->i2c->i2c_config, sizeof(msg), msg); uint32_t end2_time = timer_read_time(); - // Detect if a FIFO overrun occured - uint8_t int_reg[] = {AR_INT_STATUS}; - uint8_t int_msg; - i2c_read(mp->i2c->i2c_config, sizeof(int_reg), int_reg, sizeof(int_msg), - &int_msg); - if (int_msg & FIFO_OVERFLOW_INT) - mp->limit_count++; - // Report final data if (mp->data_count > 0) mp9250_report(mp, oid); @@ -245,14 +237,24 @@ void command_query_mpu9250_status(uint32_t *args) { struct mpu9250 *mp = oid_lookup(args[0], command_config_mpu9250); + + // Detect if a FIFO overrun occurred + uint8_t int_reg[] = {AR_INT_STATUS}; + uint8_t int_msg; + i2c_read(mp->i2c->i2c_config, sizeof(int_reg), int_reg, sizeof(int_msg), + &int_msg); + if (int_msg & FIFO_OVERFLOW_INT) + mp->limit_count++; + + // Read latest FIFO count (with precise timing) uint8_t reg[] = {AR_FIFO_COUNT_H}; uint8_t msg[2]; - uint32_t time1 = timer_read_time(); i2c_read(mp->i2c->i2c_config, sizeof(reg), reg, sizeof(msg), msg); uint32_t time2 = timer_read_time(); - uint16_t fifo_bytes = ((msg[0] & 0x1f) << 8) | msg[1]; + + // Report status mp9250_status(mp, args[0], time1, time2, fifo_bytes / BYTES_PER_FIFO_ENTRY); } DECL_COMMAND(command_query_mpu9250_status, "query_mpu9250_status oid=%c"); From dab39c02cd5681d530388fbaa82d0dc7f31d2e26 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Tue, 2 Jan 2024 19:00:57 -0500 Subject: [PATCH 29/63] mcu: Stagger trsync reporting time during multi-mcu homing When multiple MCUs are involved in homing, stagger the scheduling of the trsync_state report messages from each mcu. Staggering helps spread the bandwidth, helps reduce locking contention in the host, and reduces the chance that intermittent latency could result in a communication timeout. Signed-off-by: Kevin O'Connor --- klippy/mcu.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/klippy/mcu.py b/klippy/mcu.py index ab219cae7..1c8245883 100644 --- a/klippy/mcu.py +++ b/klippy/mcu.py @@ -185,21 +185,24 @@ def _handle_trsync_state(self, params): self._home_end_clock = None self._trsync_trigger_cmd.send([self._oid, self.REASON_PAST_END_TIME]) - def start(self, print_time, trigger_completion, expire_timeout): + def start(self, print_time, report_offset, + trigger_completion, expire_timeout): self._trigger_completion = trigger_completion self._home_end_clock = None clock = self._mcu.print_time_to_clock(print_time) expire_ticks = self._mcu.seconds_to_clock(expire_timeout) expire_clock = clock + expire_ticks report_ticks = self._mcu.seconds_to_clock(expire_timeout * .4) + report_clock = clock + int(report_ticks * report_offset + .5) min_extend_ticks = self._mcu.seconds_to_clock(expire_timeout * .4 * .8) ffi_main, ffi_lib = chelper.get_ffi() ffi_lib.trdispatch_mcu_setup(self._trdispatch_mcu, clock, expire_clock, expire_ticks, min_extend_ticks) self._mcu.register_response(self._handle_trsync_state, "trsync_state", self._oid) - self._trsync_start_cmd.send([self._oid, clock, report_ticks, - self.REASON_COMMS_TIMEOUT], reqclock=clock) + self._trsync_start_cmd.send([self._oid, report_clock, report_ticks, + self.REASON_COMMS_TIMEOUT], + reqclock=report_clock) for s in self._steppers: self._stepper_stop_cmd.send([s.get_oid(), self._oid]) self._trsync_set_timeout_cmd.send([self._oid, expire_clock], @@ -283,8 +286,10 @@ def home_start(self, print_time, sample_time, sample_count, rest_time, expire_timeout = TRSYNC_TIMEOUT if len(self._trsyncs) == 1: expire_timeout = TRSYNC_SINGLE_MCU_TIMEOUT - for trsync in self._trsyncs: - trsync.start(print_time, self._trigger_completion, expire_timeout) + for i, trsync in enumerate(self._trsyncs): + report_offset = float(i) / len(self._trsyncs) + trsync.start(print_time, report_offset, + self._trigger_completion, expire_timeout) etrsync = self._trsyncs[0] ffi_main, ffi_lib = chelper.get_ffi() ffi_lib.trdispatch_start(self._trdispatch, etrsync.REASON_HOST_REQUEST) From 1ea9f3aa35d7232ee5d106541c5a98c4348c6e47 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Tue, 2 Jan 2024 20:20:55 -0500 Subject: [PATCH 30/63] mcu: Increase trsync_state reporting during multi-mcu homing The current code has the mcu report a trsync_state message every 10ms and expects a time extension within 25ms. However, this means that if a single mcu->host report is lost then 20ms would elapse until the next report, which would allow for only a 5ms round-trip time before a timeout error is reported. Increase the trsync_state timing so that a message is sent every 7.5ms. This increases the total number of messages per second sent from mcu to host to 133 (from 100). With this change, a single lost message would still allow for up to a 10ms round-trip time. Signed-off-by: Kevin O'Connor --- klippy/mcu.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/klippy/mcu.py b/klippy/mcu.py index 1c8245883..6318ff7f5 100644 --- a/klippy/mcu.py +++ b/klippy/mcu.py @@ -192,9 +192,9 @@ def start(self, print_time, report_offset, clock = self._mcu.print_time_to_clock(print_time) expire_ticks = self._mcu.seconds_to_clock(expire_timeout) expire_clock = clock + expire_ticks - report_ticks = self._mcu.seconds_to_clock(expire_timeout * .4) + report_ticks = self._mcu.seconds_to_clock(expire_timeout * .3) report_clock = clock + int(report_ticks * report_offset + .5) - min_extend_ticks = self._mcu.seconds_to_clock(expire_timeout * .4 * .8) + min_extend_ticks = int(report_ticks * .8 + .5) ffi_main, ffi_lib = chelper.get_ffi() ffi_lib.trdispatch_mcu_setup(self._trdispatch_mcu, clock, expire_clock, expire_ticks, min_extend_ticks) From 447a88eb086829fd6139fda6e41eb8dc2e6a4366 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Thu, 4 Jan 2024 19:06:00 -0500 Subject: [PATCH 31/63] docs: Update Multi_MCU_Homing.md to note the importance of low-latency Signed-off-by: Kevin O'Connor --- docs/Multi_MCU_Homing.md | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/docs/Multi_MCU_Homing.md b/docs/Multi_MCU_Homing.md index 22d2508e4..c32a5947c 100644 --- a/docs/Multi_MCU_Homing.md +++ b/docs/Multi_MCU_Homing.md @@ -31,9 +31,15 @@ overshoot and account for it in its calculations. However, it is important that the hardware design is capable of handling overshoot without causing damage to the machine. -Should Klipper detect a communication issue between micro-controllers -during multi-mcu homing then it will raise a "Communication timeout -during homing" error. +In order to use this "multi-mcu homing" capability the hardware must +have predictably low latency between the host computer and all of the +micro-controllers. Typically the round-trip time must be consistently +less than 10ms. High latency (even for short periods) is likely to +result in homing failures. + +Should high latency result in a failure (or if some other +communication issue is detected) then Klipper will raise a +"Communication timeout during homing" error. Note that an axis with multiple steppers (eg, `stepper_z` and `stepper_z1`) need to be on the same micro-controller in order to use From 3d3b87f97fc91d93ae4198b36198262207e1202b Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Tue, 16 Jan 2024 20:40:55 -0500 Subject: [PATCH 32/63] toolhead: Ensure full kin_flush_delay after flush_step_generation() Commit b7b13588 made it possible that the kinematic code could be restarted after a flush_step_generation() call without a sufficient delay. Rename last_sg_flush_time to min_restart_time and use that to ensure _calc_print_time() always pauses kin_flush_delay time since the last flush_step_generation() call. Also, update force_move to invoke flush_step_generation() after any movements. This is needed to ensure there is a sufficient delay should force_move be called on a stepper motor that is part of the toolhead kinematics and is using a step generation "scan time". This fixes possible "internal error in stepcompress" reports when using FORCE_MOVE. Signed-off-by: Kevin O'Connor --- klippy/extras/force_move.py | 2 +- klippy/toolhead.py | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/klippy/extras/force_move.py b/klippy/extras/force_move.py index 7501ea986..9cf495191 100644 --- a/klippy/extras/force_move.py +++ b/klippy/extras/force_move.py @@ -90,8 +90,8 @@ def manual_move(self, stepper, dist, speed, accel=0.): print_time + 99999.9) stepper.set_trapq(prev_trapq) stepper.set_stepper_kinematics(prev_sk) - toolhead.note_kinematic_activity(print_time) toolhead.dwell(accel_t + cruise_t + accel_t) + toolhead.flush_step_generation() def _lookup_stepper(self, gcmd): name = gcmd.get('STEPPER') if name not in self.steppers: diff --git a/klippy/toolhead.py b/klippy/toolhead.py index 477b5fce0..55a39eff1 100644 --- a/klippy/toolhead.py +++ b/klippy/toolhead.py @@ -239,7 +239,7 @@ def __init__(self, config): # Flush tracking self.flush_timer = self.reactor.register_timer(self._flush_handler) self.do_kick_flush_timer = True - self.last_flush_time = self.last_sg_flush_time = 0. + self.last_flush_time = self.min_restart_time = 0. self.need_flush_time = self.step_gen_time = self.clear_history_time = 0. # Kinematic step generation scan window time tracking self.kin_flush_delay = SDS_CHECK_TIME @@ -289,7 +289,7 @@ def _advance_flush_time(self, flush_time): sg_flush_time = max(sg_flush_want, flush_time) for sg in self.step_generators: sg(sg_flush_time) - self.last_sg_flush_time = sg_flush_time + self.min_restart_time = max(self.min_restart_time, sg_flush_time) # Free trapq entries that are no longer needed clear_history_time = self.clear_history_time if not self.can_pause: @@ -314,7 +314,7 @@ def _advance_move_time(self, next_print_time): def _calc_print_time(self): curtime = self.reactor.monotonic() est_print_time = self.mcu.estimated_print_time(curtime) - kin_time = max(est_print_time + MIN_KIN_TIME, self.last_sg_flush_time) + kin_time = max(est_print_time + MIN_KIN_TIME, self.min_restart_time) kin_time += self.kin_flush_delay min_print_time = max(est_print_time + BUFFER_TIME_START, kin_time) if min_print_time > self.print_time: @@ -361,6 +361,7 @@ def _flush_lookahead(self): def flush_step_generation(self): self._flush_lookahead() self._advance_flush_time(self.step_gen_time) + self.min_restart_time = max(self.min_restart_time, self.print_time) def get_last_move_time(self): if self.special_queuing_state: self._flush_lookahead() From 7a74888b43a7e640a32fd18ae69d9dbdeaf55719 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Wed, 17 Jan 2024 11:22:16 -0500 Subject: [PATCH 33/63] toolhead: Extend flushing slightly past required time There is no harm in enabling flushing for a little longer than necessary. In contrast, a slight rounding issue causing a message to not get flushed properly could result in an error. So, extend the flushing time slightly to avoid potential issues. Signed-off-by: Kevin O'Connor --- klippy/extras/pwm_tool.py | 4 +--- klippy/toolhead.py | 8 +++++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/klippy/extras/pwm_tool.py b/klippy/extras/pwm_tool.py index 5fc09eab9..704266a85 100644 --- a/klippy/extras/pwm_tool.py +++ b/klippy/extras/pwm_tool.py @@ -6,7 +6,6 @@ import chelper MAX_SCHEDULE_TIME = 5.0 -CLOCK_SYNC_EXTRA_TIME = 0.050 class error(Exception): pass @@ -118,8 +117,7 @@ def _send_update(self, clock, val): # Continue flushing to resend time wakeclock += self._duration_ticks wake_print_time = self._mcu.clock_to_print_time(wakeclock) - self._toolhead.note_kinematic_activity(wake_print_time - + CLOCK_SYNC_EXTRA_TIME) + self._toolhead.note_kinematic_activity(wake_print_time) def set_pwm(self, print_time, value): clock = self._mcu.print_time_to_clock(print_time) if self._invert: diff --git a/klippy/toolhead.py b/klippy/toolhead.py index 55a39eff1..0d609a4a5 100644 --- a/klippy/toolhead.py +++ b/klippy/toolhead.py @@ -1,6 +1,6 @@ # Code for coordinating events on the printer toolhead # -# Copyright (C) 2016-2021 Kevin O'Connor +# Copyright (C) 2016-2024 Kevin O'Connor # # This file may be distributed under the terms of the GNU GPLv3 license. import math, logging, importlib @@ -191,6 +191,7 @@ def add_move(self, move): BUFFER_TIME_START = 0.250 BGFLUSH_LOW_TIME = 0.200 BGFLUSH_BATCH_TIME = 0.200 +BGFLUSH_EXTRA_TIME = 0.250 MIN_KIN_TIME = 0.100 MOVE_BATCH_TIME = 0.500 STEPCOMPRESS_FLUSH_TIME = 0.050 @@ -428,14 +429,15 @@ def _flush_handler(self, eventtime): self.check_stall_time = self.print_time # In "NeedPrime"/"Priming" state - flush queues if needed while 1: - if self.last_flush_time >= self.need_flush_time: + end_flush = self.need_flush_time + BGFLUSH_EXTRA_TIME + if self.last_flush_time >= end_flush: self.do_kick_flush_timer = True return self.reactor.NEVER buffer_time = self.last_flush_time - est_print_time if buffer_time > BGFLUSH_LOW_TIME: return eventtime + buffer_time - BGFLUSH_LOW_TIME ftime = est_print_time + BGFLUSH_LOW_TIME + BGFLUSH_BATCH_TIME - self._advance_flush_time(min(self.need_flush_time, ftime)) + self._advance_flush_time(min(end_flush, ftime)) except: logging.exception("Exception in flush_handler") self.printer.invoke_shutdown("Exception in flush_handler") From 43d0dba4b48ae7a9fcdfec2dabf053440d908fa1 Mon Sep 17 00:00:00 2001 From: grnbrg Date: Thu, 18 Jan 2024 09:13:54 -0600 Subject: [PATCH 34/63] config: Add Creality Ender 5 S1. (#6455) Creality released the Ender 5 S1 model in November of 2022. It has enough hardware differences from the previous models that that the existing Ender 5 configs are not compatible. This configuration is based on one provided by Creality that was then tweaked and modified. I have been using these values (plus some additional entries) for about 6 months with no issues. Signed-off-by: Brian Greenberg --- config/printer-creality-ender5-s1-2023.cfg | 170 +++++++++++++++++++++ test/klippy/printers.test | 1 + 2 files changed, 171 insertions(+) create mode 100644 config/printer-creality-ender5-s1-2023.cfg diff --git a/config/printer-creality-ender5-s1-2023.cfg b/config/printer-creality-ender5-s1-2023.cfg new file mode 100644 index 000000000..68a89fa5e --- /dev/null +++ b/config/printer-creality-ender5-s1-2023.cfg @@ -0,0 +1,170 @@ +# Creality Ender 5 S1 (HW version: CR4NS200141C13) +# +# printer_size: 220x220x280 +# To use this config, during "make menuconfig" select the STM32F401 +# with a "64KiB bootloader" and serial (on USART1 PA10/PA9) +# communication. +# +# Flash this firmware by creating a directory named "STM32F4_UPDATE" +# on an SD card, copying the "out/klipper.bin" to it and then turn +# on the printer with the card inserted. The firmware filename must +# end in ".bin" and must not match the last filename that was flashed. +# +# See docs/Config_Reference.md for a description of parameters. + +[stepper_x] +step_pin: PC2 +dir_pin: !PB9 +enable_pin: !PC3 +rotation_distance: 40 +microsteps: 16 +endstop_pin: !PA5 +position_endstop: 220 +position_max: 222 +homing_speed: 80 + +[stepper_y] +step_pin: PB8 +dir_pin: !PB7 +enable_pin: !PC3 +rotation_distance: 40 +microsteps: 16 +endstop_pin: !PA6 +position_endstop: 220 +position_max: 220 +homing_speed: 80 + +[stepper_z] +step_pin: PB6 +dir_pin: PB5 +enable_pin: !PC3 +rotation_distance: 8 +microsteps: 16 +endstop_pin: probe:z_virtual_endstop +position_max: 280 +homing_speed: 20 +second_homing_speed: 1 +homing_retract_dist: 2.0 + +[extruder] +step_pin: PB4 +dir_pin: PB3 +enable_pin: !PC3 +rotation_distance: 7.5 +microsteps: 16 +nozzle_diameter: 0.400 +filament_diameter: 1.750 +heater_pin: PA1 +sensor_type: EPCOS 100K B57560G104F +sensor_pin: PC5 +control: pid # tuned for stock hardware with 210 degree Celsius target +pid_kp: 20.749 +pid_ki: 1.064 +pid_kd: 101.153 +min_temp: 0 +max_temp: 305 + +[heater_bed] +heater_pin: PA7 +sensor_type: EPCOS 100K B57560G104F +sensor_pin: PC4 +control: pid # tuned for stock hardware with 60 degree Celsius target +pid_kp: 66.566 +pid_ki: 0.958 +pid_kd: 1155.761 +min_temp: 0 +max_temp: 110 + +# Part cooling fan +[fan] +pin: PA0 +kick_start_time: 0.5 + +# Hotend fan +# set fan runnig when extruder temperature is over 60 +[heater_fan heatbreak_fan] +pin: PC0 +heater:extruder +heater_temp: 60 +fan_speed: 0.8 + +[filament_switch_sensor filament_sensor] +pause_on_runout: true +switch_pin: ^!PC15 + +# Stock CR Touch bed sensor +[bltouch] +sensor_pin: ^PC14 +control_pin: PC13 +x_offset: -13 +y_offset: 27 +z_offset: 2.0 +speed: 10 +stow_on_each_sample: true # Occasional bed crashes when false +samples: 4 +sample_retract_dist: 2 +samples_result: average +probe_with_touch_mode: true + +[bed_mesh] +speed: 150 +mesh_min: 3,28 # need to handle head distance with bl_touch +mesh_max: 205,218 +mesh_pps: 3 +probe_count: 4,4 +fade_start: 1 +fade_end: 10 +fade_target: 0 + +[mcu] +serial: /dev/serial/by-id/usb-1a86_USB_Serial-if00-port0 +restart_method: command + +[safe_z_home] +home_xy_position: 123,83 +speed: 200 +z_hop: 10 +z_hop_speed: 10 + +# Many Ender 5 S1 printers appear to suffer from a slight twist +# in the X axis. This twist can be measured, and compensated for +# using the AXIS_TWIST_COMPENSATION_CALIBRATE G-Code command. See +# https://www.klipper3d.org/Axis_Twist_Compensation.html for more +# information. This section provides the setup for this optional +# calibration step. +[axis_twist_compensation] +calibrate_start_x: 3 +calibrate_end_x: 207 +calibrate_y: 110 + +# Probe locations for assisted bed screw adjustment. +[screws_tilt_adjust] +screw1: 38,6 +screw1_name: Front Left Screw +screw2: 215,6 +screw2_name: Front Right Screw +screw3: 215,175 +screw3_name: Rear Right Screw +screw4: 38,175 +screw4_name: Rear Left Screw +horizontal_move_z: 5 +speed: 100 +screw_thread: CW-M4 + +[bed_screws] +screw1: 25,25 +screw1_name: Front Left Screw +screw2: 195,25 +screw2_name: Front Right Screw +screw3: 195,195 +screw3_name: Rear Right Screw +screw4: 25,195 +screw4_name: Rear Left Screw + +[printer] +kinematics: cartesian +max_velocity: 300 +max_accel: 5000 +max_z_velocity: 5 +max_z_accel: 100 +square_corner_velocity: 5.0 diff --git a/test/klippy/printers.test b/test/klippy/printers.test index 8a876f002..94fe92c27 100644 --- a/test/klippy/printers.test +++ b/test/klippy/printers.test @@ -205,6 +205,7 @@ CONFIG ../../config/printer-voxelab-aquila-2021.cfg DICTIONARY stm32f401.dict CONFIG ../../config/generic-fysetc-cheetah-v2.0.cfg CONFIG ../../config/printer-artillery-sidewinder-x2-2022.cfg +CONFIG ../../config/printer-creality-ender5-s1-2023.cfg CONFIG ../../config/printer-elegoo-neptune3-pro-2023.cfg # Printers using the stm32f405 From d633ef2cfc6cd48e55f4c1ab5ae058d8adc5b970 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Thu, 18 Jan 2024 11:24:07 -0500 Subject: [PATCH 35/63] force_move: Fix missing call to note_kinematic_activity() Commit 3d3b87f9 incorrectly removed the call to note_kinematic_activity(). A call to toolhead.dwell() is not sufficient to wake up the mcu move queue flushing. The call to note_kinematic_activity() is needed for that. Signed-off-by: Kevin O'Connor --- klippy/extras/force_move.py | 1 + 1 file changed, 1 insertion(+) diff --git a/klippy/extras/force_move.py b/klippy/extras/force_move.py index 9cf495191..5f47f8e2e 100644 --- a/klippy/extras/force_move.py +++ b/klippy/extras/force_move.py @@ -90,6 +90,7 @@ def manual_move(self, stepper, dist, speed, accel=0.): print_time + 99999.9) stepper.set_trapq(prev_trapq) stepper.set_stepper_kinematics(prev_sk) + toolhead.note_kinematic_activity(print_time) toolhead.dwell(accel_t + cruise_t + accel_t) toolhead.flush_step_generation() def _lookup_stepper(self, gcmd): From 6cc409f6fb9f62226c56adcf80be32a0d2601ab1 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Thu, 18 Jan 2024 12:16:47 -0500 Subject: [PATCH 36/63] toolhead: Rename MoveQueue class to LookAheadQueue Rename this class so that is is not confused with the mcu "move queue". Signed-off-by: Kevin O'Connor --- docs/Code_Overview.md | 36 +++++++++++++++++++----------------- klippy/toolhead.py | 28 ++++++++++++++-------------- 2 files changed, 33 insertions(+), 31 deletions(-) diff --git a/docs/Code_Overview.md b/docs/Code_Overview.md index 5b3e07e11..0e4836acf 100644 --- a/docs/Code_Overview.md +++ b/docs/Code_Overview.md @@ -136,8 +136,9 @@ provides further information on the mechanics of moves. * The ToolHead class (in toolhead.py) handles "look-ahead" and tracks the timing of printing actions. The main codepath for a move is: - `ToolHead.move() -> MoveQueue.add_move() -> MoveQueue.flush() -> - Move.set_junction() -> ToolHead._process_moves()`. + `ToolHead.move() -> LookAheadQueue.add_move() -> + LookAheadQueue.flush() -> Move.set_junction() -> + ToolHead._process_moves()`. * ToolHead.move() creates a Move() object with the parameters of the move (in cartesian space and in units of seconds and millimeters). * The kinematics class is given the opportunity to audit each move @@ -146,10 +147,10 @@ provides further information on the mechanics of moves. may raise an error if the move is not valid. If check_move() completes successfully then the underlying kinematics must be able to handle the move. - * MoveQueue.add_move() places the move object on the "look-ahead" - queue. - * MoveQueue.flush() determines the start and end velocities of each - move. + * LookAheadQueue.add_move() places the move object on the + "look-ahead" queue. + * LookAheadQueue.flush() determines the start and end velocities of + each move. * Move.set_junction() implements the "trapezoid generator" on a move. The "trapezoid generator" breaks every move into three parts: a constant acceleration phase, followed by a constant velocity @@ -170,17 +171,18 @@ provides further information on the mechanics of moves. placed on a "trapezoid motion queue": `ToolHead._process_moves() -> trapq_append()` (in klippy/chelper/trapq.c). The step times are then generated: `ToolHead._process_moves() -> - ToolHead._update_move_time() -> MCU_Stepper.generate_steps() -> - itersolve_generate_steps() -> itersolve_gen_steps_range()` (in - klippy/chelper/itersolve.c). The goal of the iterative solver is to - find step times given a function that calculates a stepper position - from a time. This is done by repeatedly "guessing" various times - until the stepper position formula returns the desired position of - the next step on the stepper. The feedback produced from each guess - is used to improve future guesses so that the process rapidly - converges to the desired time. The kinematic stepper position - formulas are located in the klippy/chelper/ directory (eg, - kin_cart.c, kin_corexy.c, kin_delta.c, kin_extruder.c). + ToolHead._advance_move_time() -> ToolHead._advance_flush_time() -> + MCU_Stepper.generate_steps() -> itersolve_generate_steps() -> + itersolve_gen_steps_range()` (in klippy/chelper/itersolve.c). The + goal of the iterative solver is to find step times given a function + that calculates a stepper position from a time. This is done by + repeatedly "guessing" various times until the stepper position + formula returns the desired position of the next step on the + stepper. The feedback produced from each guess is used to improve + future guesses so that the process rapidly converges to the desired + time. The kinematic stepper position formulas are located in the + klippy/chelper/ directory (eg, kin_cart.c, kin_corexy.c, + kin_delta.c, kin_extruder.c). * Note that the extruder is handled in its own kinematic class: `ToolHead._process_moves() -> PrinterExtruder.move()`. Since diff --git a/klippy/toolhead.py b/klippy/toolhead.py index 0d609a4a5..64189ca21 100644 --- a/klippy/toolhead.py +++ b/klippy/toolhead.py @@ -110,7 +110,7 @@ def set_junction(self, start_v2, cruise_v2, end_v2): # Class to track a list of pending move requests and to facilitate # "look-ahead" across moves to reduce acceleration between moves. -class MoveQueue: +class LookAheadQueue: def __init__(self, toolhead): self.toolhead = toolhead self.queue = [] @@ -211,8 +211,8 @@ def __init__(self, config): self.all_mcus = [ m for n, m in self.printer.lookup_objects(module='mcu')] self.mcu = self.all_mcus[0] - self.move_queue = MoveQueue(self) - self.move_queue.set_flush_time(BUFFER_TIME_HIGH) + self.lookahead = LookAheadQueue(self) + self.lookahead.set_flush_time(BUFFER_TIME_HIGH) self.commanded_pos = [0., 0., 0., 0.] # Velocity and acceleration control self.max_velocity = config.getfloat('max_velocity', above=0.) @@ -354,10 +354,10 @@ def _process_moves(self, moves): self._advance_move_time(next_move_time) def _flush_lookahead(self): # Transit from "NeedPrime"/"Priming"/"Drip"/main state to "NeedPrime" - self.move_queue.flush() + self.lookahead.flush() self.special_queuing_state = "NeedPrime" self.need_check_pause = -1. - self.move_queue.set_flush_time(BUFFER_TIME_HIGH) + self.lookahead.set_flush_time(BUFFER_TIME_HIGH) self.check_stall_time = 0. def flush_step_generation(self): self._flush_lookahead() @@ -368,7 +368,7 @@ def get_last_move_time(self): self._flush_lookahead() self._calc_print_time() else: - self.move_queue.flush() + self.lookahead.flush() return self.print_time def _check_pause(self): eventtime = self.reactor.monotonic() @@ -462,7 +462,7 @@ def move(self, newpos, speed): if move.axes_d[3]: self.extruder.check_move(move) self.commanded_pos[:] = move.end_pos - self.move_queue.add_move(move) + self.lookahead.add_move(move) if self.print_time > self.need_check_pause: self._check_pause() def manual_move(self, coord, speed): @@ -509,12 +509,12 @@ def _update_drip_move_time(self, next_print_time): def drip_move(self, newpos, speed, drip_completion): self.dwell(self.kin_flush_delay) # Transition from "NeedPrime"/"Priming"/main state to "Drip" state - self.move_queue.flush() + self.lookahead.flush() self.special_queuing_state = "Drip" self.need_check_pause = self.reactor.NEVER self.reactor.update_timer(self.flush_timer, self.reactor.NEVER) self.do_kick_flush_timer = False - self.move_queue.set_flush_time(BUFFER_TIME_HIGH) + self.lookahead.set_flush_time(BUFFER_TIME_HIGH) self.check_stall_time = 0. self.drip_completion = drip_completion # Submit move @@ -526,9 +526,9 @@ def drip_move(self, newpos, speed, drip_completion): raise # Transmit move in "drip" mode try: - self.move_queue.flush() + self.lookahead.flush() except DripModeEndSignal as e: - self.move_queue.reset() + self.lookahead.reset() self.trapq_finalize_moves(self.trapq, self.reactor.NEVER, 0) # Exit "Drip" state self.reactor.update_timer(self.flush_timer, self.reactor.NOW) @@ -548,7 +548,7 @@ def stats(self, eventtime): self.print_time, max(buffer_time, 0.), self.print_stall) def check_busy(self, eventtime): est_print_time = self.mcu.estimated_print_time(eventtime) - lookahead_empty = not self.move_queue.queue + lookahead_empty = not self.lookahead.queue return self.print_time, est_print_time, lookahead_empty def get_status(self, eventtime): print_time = self.print_time @@ -566,7 +566,7 @@ def get_status(self, eventtime): return res def _handle_shutdown(self): self.can_pause = False - self.move_queue.reset() + self.lookahead.reset() def get_kinematics(self): return self.kin def get_trapq(self): @@ -583,7 +583,7 @@ def note_step_generation_scan_time(self, delay, old_delay=0.): new_delay = max(self.kin_flush_times + [SDS_CHECK_TIME]) self.kin_flush_delay = new_delay def register_lookahead_callback(self, callback): - last_move = self.move_queue.get_last() + last_move = self.lookahead.get_last() if last_move is None: callback(self.get_last_move_time()) return From 1d92be71da90b7d1e7fba0392fd23b506e093bf6 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Thu, 18 Jan 2024 12:20:44 -0500 Subject: [PATCH 37/63] toolhead: Rename note_kinematic_activity() to note_mcu_movequeue_activity() Rename this function to make it more clear why it is called. Signed-off-by: Kevin O'Connor --- klippy/extras/force_move.py | 2 +- klippy/extras/manual_stepper.py | 2 +- klippy/extras/pwm_tool.py | 2 +- klippy/toolhead.py | 14 +++++++------- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/klippy/extras/force_move.py b/klippy/extras/force_move.py index 5f47f8e2e..50b801412 100644 --- a/klippy/extras/force_move.py +++ b/klippy/extras/force_move.py @@ -90,7 +90,7 @@ def manual_move(self, stepper, dist, speed, accel=0.): print_time + 99999.9) stepper.set_trapq(prev_trapq) stepper.set_stepper_kinematics(prev_sk) - toolhead.note_kinematic_activity(print_time) + toolhead.note_mcu_movequeue_activity(print_time) toolhead.dwell(accel_t + cruise_t + accel_t) toolhead.flush_step_generation() def _lookup_stepper(self, gcmd): diff --git a/klippy/extras/manual_stepper.py b/klippy/extras/manual_stepper.py index 9f61e0298..40db4a503 100644 --- a/klippy/extras/manual_stepper.py +++ b/klippy/extras/manual_stepper.py @@ -70,7 +70,7 @@ def do_move(self, movepos, speed, accel, sync=True): self.trapq_finalize_moves(self.trapq, self.next_cmd_time + 99999.9, self.next_cmd_time + 99999.9) toolhead = self.printer.lookup_object('toolhead') - toolhead.note_kinematic_activity(self.next_cmd_time) + toolhead.note_mcu_movequeue_activity(self.next_cmd_time) if sync: self.sync_print_time() def do_homing_move(self, movepos, speed, accel, triggered, check_trigger): diff --git a/klippy/extras/pwm_tool.py b/klippy/extras/pwm_tool.py index 704266a85..aa95ecbef 100644 --- a/klippy/extras/pwm_tool.py +++ b/klippy/extras/pwm_tool.py @@ -117,7 +117,7 @@ def _send_update(self, clock, val): # Continue flushing to resend time wakeclock += self._duration_ticks wake_print_time = self._mcu.clock_to_print_time(wakeclock) - self._toolhead.note_kinematic_activity(wake_print_time) + self._toolhead.note_mcu_movequeue_activity(wake_print_time) def set_pwm(self, print_time, value): clock = self._mcu.print_time_to_clock(print_time) if self._invert: diff --git a/klippy/toolhead.py b/klippy/toolhead.py index 64189ca21..ce014365c 100644 --- a/klippy/toolhead.py +++ b/klippy/toolhead.py @@ -349,8 +349,8 @@ def _process_moves(self, moves): # Generate steps for moves if self.special_queuing_state: self._update_drip_move_time(next_move_time) - self.note_kinematic_activity(next_move_time + self.kin_flush_delay, - set_step_gen_time=True) + self.note_mcu_movequeue_activity(next_move_time + self.kin_flush_delay, + set_step_gen_time=True) self._advance_move_time(next_move_time) def _flush_lookahead(self): # Transit from "NeedPrime"/"Priming"/"Drip"/main state to "NeedPrime" @@ -503,8 +503,8 @@ def _update_drip_move_time(self, next_print_time): self.drip_completion.wait(curtime + wait_time) continue npt = min(self.print_time + DRIP_SEGMENT_TIME, next_print_time) - self.note_kinematic_activity(npt + self.kin_flush_delay, - set_step_gen_time=True) + self.note_mcu_movequeue_activity(npt + self.kin_flush_delay, + set_step_gen_time=True) self._advance_move_time(npt) def drip_move(self, newpos, speed, drip_completion): self.dwell(self.kin_flush_delay) @@ -588,10 +588,10 @@ def register_lookahead_callback(self, callback): callback(self.get_last_move_time()) return last_move.timing_callbacks.append(callback) - def note_kinematic_activity(self, kin_time, set_step_gen_time=False): - self.need_flush_time = max(self.need_flush_time, kin_time) + def note_mcu_movequeue_activity(self, mq_time, set_step_gen_time=False): + self.need_flush_time = max(self.need_flush_time, mq_time) if set_step_gen_time: - self.step_gen_time = max(self.step_gen_time, kin_time) + self.step_gen_time = max(self.step_gen_time, mq_time) if self.do_kick_flush_timer: self.do_kick_flush_timer = False self.reactor.update_timer(self.flush_timer, self.reactor.NOW) From 94719fe327874d57ee92610c90972f74170a7b40 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Thu, 18 Jan 2024 13:36:17 -0500 Subject: [PATCH 38/63] docs: Update to mkdocs to use latest jinj2 version There is a jinja2 security advisory on the current Jinja2 version. Klipper is not impacted by this advisory (as it does not run jinja2 on any untrusted data), but there is no harm in updating. Signed-off-by: Kevin O'Connor --- docs/_klipper3d/mkdocs-requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/_klipper3d/mkdocs-requirements.txt b/docs/_klipper3d/mkdocs-requirements.txt index 9ea6d2192..739288959 100644 --- a/docs/_klipper3d/mkdocs-requirements.txt +++ b/docs/_klipper3d/mkdocs-requirements.txt @@ -1,6 +1,6 @@ # Python virtualenv module requirements for mkdocs -jinja2==3.0.3 -mkdocs==1.2.3 +jinja2==3.1.3 +mkdocs==1.2.4 mkdocs-material==8.1.3 mkdocs-simple-hooks==0.1.3 mkdocs-exclude==1.0.2 From 83d0d2f19b81550e82d6f68415dfcd6df90dee69 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Sun, 17 Dec 2023 11:21:01 -0500 Subject: [PATCH 39/63] mcu: Add send_wait_ack() support to CommandWrapper Signed-off-by: Kevin O'Connor --- klippy/mcu.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/klippy/mcu.py b/klippy/mcu.py index 6318ff7f5..cfc389e76 100644 --- a/klippy/mcu.py +++ b/klippy/mcu.py @@ -91,6 +91,9 @@ def __init__(self, serial, msgformat, cmd_queue=None): def send(self, data=(), minclock=0, reqclock=0): cmd = self._cmd.encode(data) self._serial.raw_send(cmd, minclock, reqclock, self._cmd_queue) + def send_wait_ack(self, data=(), minclock=0, reqclock=0): + cmd = self._cmd.encode(data) + self._serial.raw_send_wait_ack(cmd, minclock, reqclock, self._cmd_queue) def get_command_tag(self): return self._msgtag From 3275614b895c409f30cc9520c02990b18abad3c6 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Sun, 17 Dec 2023 11:30:23 -0500 Subject: [PATCH 40/63] sensor_adxl345: No need to send messages when stopping queries Simplify the mcu code as any messages are ignored by the host anyway. Signed-off-by: Kevin O'Connor --- klippy/extras/adxl345.py | 8 ++------ src/sensor_adxl345.c | 18 ------------------ 2 files changed, 2 insertions(+), 24 deletions(-) diff --git a/klippy/extras/adxl345.py b/klippy/extras/adxl345.py index 8f40c7fec..76fd4ca45 100644 --- a/klippy/extras/adxl345.py +++ b/klippy/extras/adxl345.py @@ -204,7 +204,7 @@ def __init__(self, config): self.spi = bus.MCU_SPI_from_config(config, 3, default_speed=5000000) self.mcu = mcu = self.spi.get_mcu() self.oid = oid = mcu.create_oid() - self.query_adxl345_cmd = self.query_adxl345_end_cmd = None + self.query_adxl345_cmd = None self.query_adxl345_status_cmd = None mcu.add_config_cmd("config_adxl345 oid=%d spi_oid=%d" % (oid, self.spi.get_oid())) @@ -230,10 +230,6 @@ def _build_config(self): cmdqueue = self.spi.get_command_queue() self.query_adxl345_cmd = self.mcu.lookup_command( "query_adxl345 oid=%c clock=%u rest_ticks=%u", cq=cmdqueue) - self.query_adxl345_end_cmd = self.mcu.lookup_query_command( - "query_adxl345 oid=%c clock=%u rest_ticks=%u", - "adxl345_status oid=%c clock=%u query_ticks=%u next_sequence=%hu" - " buffered=%c fifo=%c limit_count=%hu", oid=self.oid, cq=cmdqueue) self.query_adxl345_status_cmd = self.mcu.lookup_query_command( "query_adxl345_status oid=%c", "adxl345_status oid=%c clock=%u query_ticks=%u next_sequence=%hu" @@ -334,7 +330,7 @@ def _start_measurements(self): self.last_error_count = 0 def _finish_measurements(self): # Halt bulk reading - params = self.query_adxl345_end_cmd.send([self.oid, 0, 0]) + self.query_adxl345_cmd.send_wait_ack([self.oid, 0, 0]) self.bulk_queue.clear_samples() logging.info("ADXL345 finished '%s' measurements", self.name) def _process_batch(self, eventtime): diff --git a/src/sensor_adxl345.c b/src/sensor_adxl345.c index 3d80059d0..5ec3945e8 100644 --- a/src/sensor_adxl345.c +++ b/src/sensor_adxl345.c @@ -148,25 +148,7 @@ adxl_stop(struct adxl345 *ax, uint8_t oid) sched_del_timer(&ax->timer); ax->flags = 0; uint8_t msg[2] = { AR_POWER_CTL, 0x00 }; - uint32_t end1_time = timer_read_time(); spidev_transfer(ax->spi, 0, sizeof(msg), msg); - uint32_t end2_time = timer_read_time(); - // Drain any measurements still in fifo - uint_fast8_t i; - for (i=0; i<33; i++) { - msg[0] = AR_FIFO_STATUS | AM_READ; - msg[1] = 0x00; - spidev_transfer(ax->spi, 1, sizeof(msg), msg); - uint_fast8_t fifo_status = msg[1] & ~0x80; - if (!fifo_status) - break; - if (fifo_status <= 32) - adxl_query(ax, oid); - } - // Report final data - if (ax->data_count) - adxl_report(ax, oid); - adxl_status(ax, oid, end1_time, end2_time, msg[1]); } void From 5ff555a705b8d3a586f240eb7456b591ee928ac2 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Sun, 17 Dec 2023 11:31:58 -0500 Subject: [PATCH 41/63] sensor_mpu9250: No need to send messages when stopping queries Simplify the mcu code as any messages are ignored by the host anyway. Signed-off-by: Kevin O'Connor --- klippy/extras/mpu9250.py | 8 ++------ src/sensor_mpu9250.c | 9 --------- 2 files changed, 2 insertions(+), 15 deletions(-) diff --git a/klippy/extras/mpu9250.py b/klippy/extras/mpu9250.py index 04a33eb24..41376dc35 100644 --- a/klippy/extras/mpu9250.py +++ b/klippy/extras/mpu9250.py @@ -69,7 +69,7 @@ def __init__(self, config): default_speed=400000) self.mcu = mcu = self.i2c.get_mcu() self.oid = oid = mcu.create_oid() - self.query_mpu9250_cmd = self.query_mpu9250_end_cmd = None + self.query_mpu9250_cmd = None self.query_mpu9250_status_cmd = None mcu.register_config_callback(self._build_config) self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, "mpu9250_data", oid) @@ -95,10 +95,6 @@ def _build_config(self): % (self.oid,), on_restart=True) self.query_mpu9250_cmd = self.mcu.lookup_command( "query_mpu9250 oid=%c clock=%u rest_ticks=%u", cq=cmdqueue) - self.query_mpu9250_end_cmd = self.mcu.lookup_query_command( - "query_mpu9250 oid=%c clock=%u rest_ticks=%u", - "mpu9250_status oid=%c clock=%u query_ticks=%u next_sequence=%hu" - " buffered=%c fifo=%u limit_count=%hu", oid=self.oid, cq=cmdqueue) self.query_mpu9250_status_cmd = self.mcu.lookup_query_command( "query_mpu9250_status oid=%c", "mpu9250_status oid=%c clock=%u query_ticks=%u next_sequence=%hu" @@ -193,7 +189,7 @@ def _start_measurements(self): self.last_error_count = 0 def _finish_measurements(self): # Halt bulk reading - params = self.query_mpu9250_end_cmd.send([self.oid, 0, 0]) + self.query_mpu9250_cmd.send_wait_ack([self.oid, 0, 0]) self.bulk_queue.clear_samples() logging.info("MPU9250 finished '%s' measurements", self.name) self.set_reg(REG_PWR_MGMT_1, SET_PWR_MGMT_1_SLEEP) diff --git a/src/sensor_mpu9250.c b/src/sensor_mpu9250.c index c535d0971..7792b4d80 100644 --- a/src/sensor_mpu9250.c +++ b/src/sensor_mpu9250.c @@ -192,16 +192,7 @@ mp9250_stop(struct mpu9250 *mp, uint8_t oid) // disable accel FIFO uint8_t msg[2] = { AR_FIFO_EN, SET_DISABLE_FIFO }; - uint32_t end1_time = timer_read_time(); i2c_write(mp->i2c->i2c_config, sizeof(msg), msg); - uint32_t end2_time = timer_read_time(); - - // Report final data - if (mp->data_count > 0) - mp9250_report(mp, oid); - uint16_t bytes_to_read = get_fifo_status(mp); - mp9250_status(mp, oid, end1_time, end2_time, - bytes_to_read / BYTES_PER_FIFO_ENTRY); // Uncomment and rebuilt to check for FIFO overruns when tuning //output("mpu9240 limit_count=%u fifo_max=%u", From 95e1a290f125650f5131a960d685d53a1e2f5e4f Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Sun, 17 Dec 2023 11:33:11 -0500 Subject: [PATCH 42/63] sensor_lis2dw: No need to send messages when stopping queries Simplify the mcu code as any messages are ignored by the host anyway. Signed-off-by: Kevin O'Connor --- klippy/extras/lis2dw.py | 8 ++------ src/sensor_lis2dw.c | 21 ++------------------- 2 files changed, 4 insertions(+), 25 deletions(-) diff --git a/klippy/extras/lis2dw.py b/klippy/extras/lis2dw.py index 28591c21b..a7fe54d74 100644 --- a/klippy/extras/lis2dw.py +++ b/klippy/extras/lis2dw.py @@ -48,7 +48,7 @@ def __init__(self, config): self.spi = bus.MCU_SPI_from_config(config, 3, default_speed=5000000) self.mcu = mcu = self.spi.get_mcu() self.oid = oid = mcu.create_oid() - self.query_lis2dw_cmd = self.query_lis2dw_end_cmd = None + self.query_lis2dw_cmd = None self.query_lis2dw_status_cmd = None mcu.add_config_cmd("config_lis2dw oid=%d spi_oid=%d" % (oid, self.spi.get_oid())) @@ -75,10 +75,6 @@ def _build_config(self): cmdqueue = self.spi.get_command_queue() self.query_lis2dw_cmd = self.mcu.lookup_command( "query_lis2dw oid=%c clock=%u rest_ticks=%u", cq=cmdqueue) - self.query_lis2dw_end_cmd = self.mcu.lookup_query_command( - "query_lis2dw oid=%c clock=%u rest_ticks=%u", - "lis2dw_status oid=%c clock=%u query_ticks=%u next_sequence=%hu" - " buffered=%c fifo=%c limit_count=%hu", oid=self.oid, cq=cmdqueue) self.query_lis2dw_status_cmd = self.mcu.lookup_query_command( "query_lis2dw_status oid=%c", "lis2dw_status oid=%c clock=%u query_ticks=%u next_sequence=%hu" @@ -179,7 +175,7 @@ def _start_measurements(self): self.last_error_count = 0 def _finish_measurements(self): # Halt bulk reading - params = self.query_lis2dw_end_cmd.send([self.oid, 0, 0]) + self.query_lis2dw_cmd.send_wait_ack([self.oid, 0, 0]) self.bulk_queue.clear_samples() logging.info("LIS2DW finished '%s' measurements", self.name) self.set_reg(REG_LIS2DW_FIFO_CTRL, 0x00) diff --git a/src/sensor_lis2dw.c b/src/sensor_lis2dw.c index 52612623f..06dd32068 100644 --- a/src/sensor_lis2dw.c +++ b/src/sensor_lis2dw.c @@ -23,7 +23,7 @@ struct lis2dw { uint32_t rest_ticks; struct spidev_s *spi; uint16_t sequence, limit_count; - uint8_t flags, data_count, fifo_disable; + uint8_t flags, data_count; uint8_t data[48]; }; @@ -117,7 +117,7 @@ lis2dw_query(struct lis2dw *ax, uint8_t oid) ax->limit_count++; // check if we need to run the task again (more packets in fifo?) - if (!fifo_empty&&!(ax->fifo_disable)) { + if (!fifo_empty) { // More data in fifo - wake this task again sched_wake_task(&lis2dw_wake); } else if (ax->flags & LIS_RUNNING) { @@ -134,7 +134,6 @@ lis2dw_start(struct lis2dw *ax, uint8_t oid) { sched_del_timer(&ax->timer); ax->flags = LIS_RUNNING; - ax->fifo_disable = 0; uint8_t ctrl[2] = {LIS_FIFO_CTRL , 0xC0}; spidev_transfer(ax->spi, 0, sizeof(ctrl), ctrl); lis2dw_reschedule_timer(ax); @@ -147,23 +146,8 @@ lis2dw_stop(struct lis2dw *ax, uint8_t oid) // Disable measurements sched_del_timer(&ax->timer); ax->flags = 0; - // Drain any measurements still in fifo - ax->fifo_disable = 1; - lis2dw_query(ax, oid); - uint8_t ctrl[2] = {LIS_FIFO_CTRL , 0}; - uint32_t end1_time = timer_read_time(); spidev_transfer(ax->spi, 0, sizeof(ctrl), ctrl); - uint32_t end2_time = timer_read_time(); - - uint8_t msg[2] = { LIS_FIFO_SAMPLES | LIS_AM_READ , 0}; - spidev_transfer(ax->spi, 1, sizeof(msg), msg); - uint8_t fifo_status = msg[1]&0x1f; - - //Report final data - if (ax->data_count) - lis2dw_report(ax, oid); - lis2dw_status(ax, oid, end1_time, end2_time, fifo_status); } void @@ -183,7 +167,6 @@ command_query_lis2dw(uint32_t *args) ax->flags = LIS_HAVE_START; ax->sequence = ax->limit_count = 0; ax->data_count = 0; - ax->fifo_disable = 0; sched_add_timer(&ax->timer); } DECL_COMMAND(command_query_lis2dw, From dc6182f3b339b990c8a68940f02a210e332be269 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Sun, 17 Dec 2023 11:35:40 -0500 Subject: [PATCH 43/63] sensor_angle: No need to send messages when stopping queries Simplify the mcu code as any messages are ignored by the host anyway. Signed-off-by: Kevin O'Connor --- klippy/extras/angle.py | 7 ++----- src/sensor_angle.c | 7 ++----- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/klippy/extras/angle.py b/klippy/extras/angle.py index b1aa0d967..163168d0b 100644 --- a/klippy/extras/angle.py +++ b/klippy/extras/angle.py @@ -437,7 +437,7 @@ def __init__(self, config): self.oid = oid = mcu.create_oid() self.sensor_helper = sensor_class(config, self.spi, oid) # Setup mcu sensor_spi_angle bulk query code - self.query_spi_angle_cmd = self.query_spi_angle_end_cmd = None + self.query_spi_angle_cmd = None mcu.add_config_cmd( "config_spi_angle oid=%d spi_oid=%d spi_angle_type=%s" % (oid, self.spi.get_oid(), sensor_type)) @@ -462,9 +462,6 @@ def _build_config(self): self.query_spi_angle_cmd = self.mcu.lookup_command( "query_spi_angle oid=%c clock=%u rest_ticks=%u time_shift=%c", cq=cmdqueue) - self.query_spi_angle_end_cmd = self.mcu.lookup_query_command( - "query_spi_angle oid=%c clock=%u rest_ticks=%u time_shift=%c", - "spi_angle_end oid=%c sequence=%hu", oid=self.oid, cq=cmdqueue) def get_status(self, eventtime=None): return {'temperature': self.sensor_helper.last_temperature} def add_client(self, client_cb): @@ -543,7 +540,7 @@ def _start_measurements(self): self.time_shift], reqclock=reqclock) def _finish_measurements(self): # Halt bulk reading - params = self.query_spi_angle_end_cmd.send([self.oid, 0, 0, 0]) + self.query_spi_angle_cmd.send_wait_ack([self.oid, 0, 0, 0]) self.bulk_queue.clear_samples() self.sensor_helper.last_temperature = None logging.info("Stopped angle '%s' measurements", self.name) diff --git a/src/sensor_angle.c b/src/sensor_angle.c index 4d35aadf1..865670b89 100644 --- a/src/sensor_angle.c +++ b/src/sensor_angle.c @@ -230,13 +230,10 @@ command_query_spi_angle(uint32_t *args) sched_del_timer(&sa->timer); sa->flags = 0; - if (!args[2]) { + if (!args[2]) // End measurements - if (sa->data_count) - angle_report(sa, oid); - sendf("spi_angle_end oid=%c sequence=%hu", oid, sa->sequence); return; - } + // Start new measurements query sa->timer.waketime = args[1]; sa->rest_ticks = args[2]; From 266e96621c0133e1192bbaec5addb6bcf443a203 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Sun, 17 Dec 2023 17:59:25 -0500 Subject: [PATCH 44/63] sensor_bulk: New C file with helper code for sending bulk sensor measurements Refactor the low-level "bulk sensor" management code in the mcu. This updates the sensor_adxl345.c, sensor_mpu9250.c, sensor_lis2dw.c, and sensor_angle.c code to use the same "bulk sensor" messages. All of these sensors will now send "sensor_bulk_data" and "sensor_bulk_status" messages. Signed-off-by: Kevin O'Connor --- klippy/extras/adxl345.py | 28 +++++--------------- klippy/extras/angle.py | 4 +-- klippy/extras/bulk_sensor.py | 34 ++++++++++++++---------- klippy/extras/lis2dw.py | 21 +++++---------- klippy/extras/mpu9250.py | 22 +++++----------- src/Kconfig | 4 +++ src/Makefile | 3 ++- src/sensor_adxl345.c | 51 ++++++++++++++---------------------- src/sensor_angle.c | 33 +++++++++-------------- src/sensor_bulk.c | 38 +++++++++++++++++++++++++++ src/sensor_bulk.h | 15 +++++++++++ src/sensor_lis2dw.c | 45 +++++++++---------------------- src/sensor_mpu9250.c | 44 +++++++------------------------ 13 files changed, 153 insertions(+), 189 deletions(-) create mode 100644 src/sensor_bulk.c create mode 100644 src/sensor_bulk.h diff --git a/klippy/extras/adxl345.py b/klippy/extras/adxl345.py index 76fd4ca45..b91224d50 100644 --- a/klippy/extras/adxl345.py +++ b/klippy/extras/adxl345.py @@ -187,7 +187,7 @@ def read_axes_map(config): MIN_MSG_TIME = 0.100 BYTES_PER_SAMPLE = 5 -SAMPLES_PER_BLOCK = 10 +SAMPLES_PER_BLOCK = bulk_sensor.MAX_BULK_MSG_SIZE // BYTES_PER_SAMPLE BATCH_UPDATES = 0.100 @@ -205,13 +205,12 @@ def __init__(self, config): self.mcu = mcu = self.spi.get_mcu() self.oid = oid = mcu.create_oid() self.query_adxl345_cmd = None - self.query_adxl345_status_cmd = None mcu.add_config_cmd("config_adxl345 oid=%d spi_oid=%d" % (oid, self.spi.get_oid())) mcu.add_config_cmd("query_adxl345 oid=%d clock=0 rest_ticks=0" % (oid,), on_restart=True) mcu.register_config_callback(self._build_config) - self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, "adxl345_data", oid) + self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, oid=oid) # Clock tracking chip_smooth = self.data_rate * BATCH_UPDATES * 2 self.clock_sync = bulk_sensor.ClockSyncRegression(mcu, chip_smooth) @@ -230,10 +229,8 @@ def _build_config(self): cmdqueue = self.spi.get_command_queue() self.query_adxl345_cmd = self.mcu.lookup_command( "query_adxl345 oid=%c clock=%u rest_ticks=%u", cq=cmdqueue) - self.query_adxl345_status_cmd = self.mcu.lookup_query_command( - "query_adxl345_status oid=%c", - "adxl345_status oid=%c clock=%u query_ticks=%u next_sequence=%hu" - " buffered=%c fifo=%c limit_count=%hu", oid=self.oid, cq=cmdqueue) + self.clock_updater.setup_query_command( + self.mcu, "query_adxl345_status oid=%c", oid=self.oid, cq=cmdqueue) def read_reg(self, reg): params = self.spi.spi_transfer([reg | REG_MOD_READ, 0x00]) response = bytearray(params['response']) @@ -286,17 +283,6 @@ def _extract_samples(self, raw_samples): self.clock_sync.set_last_chip_clock(seq * SAMPLES_PER_BLOCK + i) del samples[count:] return samples - def _update_clock(self, minclock=0): - # Query current state - for retry in range(5): - params = self.query_adxl345_status_cmd.send([self.oid], - minclock=minclock) - fifo = params['fifo'] & 0x7f - if fifo <= 32: - break - else: - raise self.printer.command_error("Unable to query adxl345 fifo") - self.clock_updater.update_clock(params) # Start, stop, and process message batches def _start_measurements(self): # In case of miswiring, testing ADXL345 device ID prevents treating @@ -325,8 +311,6 @@ def _start_measurements(self): logging.info("ADXL345 starting '%s' measurements", self.name) # Initialize clock tracking self.clock_updater.note_start(reqclock) - self._update_clock(minclock=reqclock) - self.clock_updater.clear_duration_filter() self.last_error_count = 0 def _finish_measurements(self): # Halt bulk reading @@ -334,7 +318,7 @@ def _finish_measurements(self): self.bulk_queue.clear_samples() logging.info("ADXL345 finished '%s' measurements", self.name) def _process_batch(self, eventtime): - self._update_clock() + self.clock_updater.update_clock() raw_samples = self.bulk_queue.pull_samples() if not raw_samples: return {} @@ -342,7 +326,7 @@ def _process_batch(self, eventtime): if not samples: return {} return {'data': samples, 'errors': self.last_error_count, - 'overflows': self.clock_updater.get_last_limit_count()} + 'overflows': self.clock_updater.get_last_overflows()} def load_config(config): return ADXL345(config) diff --git a/klippy/extras/angle.py b/klippy/extras/angle.py index 163168d0b..23f402a7e 100644 --- a/klippy/extras/angle.py +++ b/klippy/extras/angle.py @@ -412,7 +412,7 @@ def cmd_ANGLE_DEBUG_WRITE(self, gcmd): self._write_reg(reg, val) BYTES_PER_SAMPLE = 3 -SAMPLES_PER_BLOCK = 16 +SAMPLES_PER_BLOCK = bulk_sensor.MAX_BULK_MSG_SIZE // BYTES_PER_SAMPLE SAMPLE_PERIOD = 0.000400 BATCH_UPDATES = 0.100 @@ -445,7 +445,7 @@ def __init__(self, config): "query_spi_angle oid=%d clock=0 rest_ticks=0 time_shift=0" % (oid,), on_restart=True) mcu.register_config_callback(self._build_config) - self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, "spi_angle_data", oid) + self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, oid=oid) # Process messages in batches self.batch_bulk = bulk_sensor.BatchBulkHelper( self.printer, self._process_batch, diff --git a/klippy/extras/bulk_sensor.py b/klippy/extras/bulk_sensor.py index 8d0c05416..df5a5da25 100644 --- a/klippy/extras/bulk_sensor.py +++ b/klippy/extras/bulk_sensor.py @@ -114,7 +114,7 @@ def handle_batch(self, msg): # Helper class to store incoming messages in a queue class BulkDataQueue: - def __init__(self, mcu, msg_name, oid): + def __init__(self, mcu, msg_name="sensor_bulk_data", oid=None): # Measurement storage (accessed from background thread) self.lock = threading.Lock() self.raw_samples = [] @@ -206,31 +206,37 @@ def __init__(self, clock_sync, bytes_per_sample): self.clock_sync = clock_sync self.bytes_per_sample = bytes_per_sample self.samples_per_block = MAX_BULK_MSG_SIZE // bytes_per_sample - self.mcu = clock_sync.mcu self.last_sequence = self.max_query_duration = 0 - self.last_limit_count = 0 + self.last_overflows = 0 + self.mcu = self.oid = self.query_status_cmd = None + def setup_query_command(self, mcu, msgformat, oid, cq): + self.mcu = mcu + self.oid = oid + self.query_status_cmd = self.mcu.lookup_query_command( + msgformat, "sensor_bulk_status oid=%c clock=%u query_ticks=%u" + " next_sequence=%hu buffered=%u possible_overflows=%hu", + oid=oid, cq=cq) def get_last_sequence(self): return self.last_sequence - def get_last_limit_count(self): - return self.last_limit_count + def get_last_overflows(self): + return self.last_overflows def clear_duration_filter(self): self.max_query_duration = 1 << 31 def note_start(self, reqclock): self.last_sequence = 0 - self.last_limit_count = 0 + self.last_overflows = 0 self.clock_sync.reset(reqclock, 0) self.clear_duration_filter() - def update_clock(self, params): - # Handle a status response message of the form: - # adxl345_status oid=x clock=x query_ticks=x next_sequence=x - # buffered=x fifo=x limit_count=x - fifo = params['fifo'] + self.update_clock(minclock=reqclock) + self.clear_duration_filter() + def update_clock(self, minclock=0): + params = self.query_status_cmd.send([self.oid], minclock=minclock) mcu_clock = self.mcu.clock32_to_clock64(params['clock']) seq_diff = (params['next_sequence'] - self.last_sequence) & 0xffff self.last_sequence += seq_diff buffered = params['buffered'] - lc_diff = (params['limit_count'] - self.last_limit_count) & 0xffff - self.last_limit_count += lc_diff + po_diff = (params['possible_overflows'] - self.last_overflows) & 0xffff + self.last_overflows += po_diff duration = params['query_ticks'] if duration > self.max_query_duration: # Skip measurement as a high query time could skew clock tracking @@ -239,7 +245,7 @@ def update_clock(self, params): return self.max_query_duration = 2 * duration msg_count = (self.last_sequence * self.samples_per_block - + buffered // self.bytes_per_sample + fifo) + + buffered // self.bytes_per_sample) # The "chip clock" is the message counter plus .5 for average # inaccuracy of query responses and plus .5 for assumed offset # of hardware processing time. diff --git a/klippy/extras/lis2dw.py b/klippy/extras/lis2dw.py index a7fe54d74..74911e6fb 100644 --- a/klippy/extras/lis2dw.py +++ b/klippy/extras/lis2dw.py @@ -33,7 +33,7 @@ MIN_MSG_TIME = 0.100 BYTES_PER_SAMPLE = 6 -SAMPLES_PER_BLOCK = 8 +SAMPLES_PER_BLOCK = bulk_sensor.MAX_BULK_MSG_SIZE // BYTES_PER_SAMPLE BATCH_UPDATES = 0.100 @@ -49,13 +49,12 @@ def __init__(self, config): self.mcu = mcu = self.spi.get_mcu() self.oid = oid = mcu.create_oid() self.query_lis2dw_cmd = None - self.query_lis2dw_status_cmd = None mcu.add_config_cmd("config_lis2dw oid=%d spi_oid=%d" % (oid, self.spi.get_oid())) mcu.add_config_cmd("query_lis2dw oid=%d clock=0 rest_ticks=0" % (oid,), on_restart=True) mcu.register_config_callback(self._build_config) - self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, "lis2dw_data", oid) + self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, oid=oid) # Clock tracking chip_smooth = self.data_rate * BATCH_UPDATES * 2 self.clock_sync = bulk_sensor.ClockSyncRegression(mcu, chip_smooth) @@ -75,10 +74,8 @@ def _build_config(self): cmdqueue = self.spi.get_command_queue() self.query_lis2dw_cmd = self.mcu.lookup_command( "query_lis2dw oid=%c clock=%u rest_ticks=%u", cq=cmdqueue) - self.query_lis2dw_status_cmd = self.mcu.lookup_query_command( - "query_lis2dw_status oid=%c", - "lis2dw_status oid=%c clock=%u query_ticks=%u next_sequence=%hu" - " buffered=%c fifo=%c limit_count=%hu", oid=self.oid, cq=cmdqueue) + self.clock_updater.setup_query_command( + self.mcu, "query_lis2dw_status oid=%c", oid=self.oid, cq=cmdqueue) def read_reg(self, reg): params = self.spi.spi_transfer([reg | REG_MOD_READ, 0x00]) response = bytearray(params['response']) @@ -133,10 +130,6 @@ def _extract_samples(self, raw_samples): self.clock_sync.set_last_chip_clock(seq * SAMPLES_PER_BLOCK + i) del samples[count:] return samples - def _update_clock(self, minclock=0): - params = self.query_lis2dw_status_cmd.send([self.oid], - minclock=minclock) - self.clock_updater.update_clock(params) # Start, stop, and process message batches def _start_measurements(self): # In case of miswiring, testing LIS2DW device ID prevents treating @@ -170,8 +163,6 @@ def _start_measurements(self): logging.info("LIS2DW starting '%s' measurements", self.name) # Initialize clock tracking self.clock_updater.note_start(reqclock) - self._update_clock(minclock=reqclock) - self.clock_updater.clear_duration_filter() self.last_error_count = 0 def _finish_measurements(self): # Halt bulk reading @@ -180,7 +171,7 @@ def _finish_measurements(self): logging.info("LIS2DW finished '%s' measurements", self.name) self.set_reg(REG_LIS2DW_FIFO_CTRL, 0x00) def _process_batch(self, eventtime): - self._update_clock() + self.clock_updater.update_clock() raw_samples = self.bulk_queue.pull_samples() if not raw_samples: return {} @@ -188,7 +179,7 @@ def _process_batch(self, eventtime): if not samples: return {} return {'data': samples, 'errors': self.last_error_count, - 'overflows': self.clock_updater.get_last_limit_count()} + 'overflows': self.clock_updater.get_last_overflows()} def load_config(config): return LIS2DW(config) diff --git a/klippy/extras/mpu9250.py b/klippy/extras/mpu9250.py index 41376dc35..4626e1c0c 100644 --- a/klippy/extras/mpu9250.py +++ b/klippy/extras/mpu9250.py @@ -50,7 +50,7 @@ MIN_MSG_TIME = 0.100 BYTES_PER_SAMPLE = 6 -SAMPLES_PER_BLOCK = 8 +SAMPLES_PER_BLOCK = bulk_sensor.MAX_BULK_MSG_SIZE // BYTES_PER_SAMPLE BATCH_UPDATES = 0.100 @@ -70,9 +70,8 @@ def __init__(self, config): self.mcu = mcu = self.i2c.get_mcu() self.oid = oid = mcu.create_oid() self.query_mpu9250_cmd = None - self.query_mpu9250_status_cmd = None mcu.register_config_callback(self._build_config) - self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, "mpu9250_data", oid) + self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, oid=oid) # Clock tracking chip_smooth = self.data_rate * BATCH_UPDATES * 2 self.clock_sync = bulk_sensor.ClockSyncRegression(mcu, chip_smooth) @@ -95,10 +94,8 @@ def _build_config(self): % (self.oid,), on_restart=True) self.query_mpu9250_cmd = self.mcu.lookup_command( "query_mpu9250 oid=%c clock=%u rest_ticks=%u", cq=cmdqueue) - self.query_mpu9250_status_cmd = self.mcu.lookup_query_command( - "query_mpu9250_status oid=%c", - "mpu9250_status oid=%c clock=%u query_ticks=%u next_sequence=%hu" - " buffered=%c fifo=%u limit_count=%hu", oid=self.oid, cq=cmdqueue) + self.clock_updater.setup_query_command( + self.mcu, "query_mpu9250_status oid=%c", oid=self.oid, cq=cmdqueue) def read_reg(self, reg): params = self.i2c.i2c_read([reg], 1) return bytearray(params['response'])[0] @@ -142,11 +139,6 @@ def _extract_samples(self, raw_samples): self.clock_sync.set_last_chip_clock(seq * SAMPLES_PER_BLOCK + i) del samples[count:] return samples - - def _update_clock(self, minclock=0): - params = self.query_mpu9250_status_cmd.send([self.oid], - minclock=minclock) - self.clock_updater.update_clock(params) # Start, stop, and process message batches def _start_measurements(self): # In case of miswiring, testing MPU9250 device ID prevents treating @@ -184,8 +176,6 @@ def _start_measurements(self): logging.info("MPU9250 starting '%s' measurements", self.name) # Initialize clock tracking self.clock_updater.note_start(reqclock) - self._update_clock(minclock=reqclock) - self.clock_updater.clear_duration_filter() self.last_error_count = 0 def _finish_measurements(self): # Halt bulk reading @@ -195,7 +185,7 @@ def _finish_measurements(self): self.set_reg(REG_PWR_MGMT_1, SET_PWR_MGMT_1_SLEEP) self.set_reg(REG_PWR_MGMT_2, SET_PWR_MGMT_2_OFF) def _process_batch(self, eventtime): - self._update_clock() + self.clock_updater.update_clock() raw_samples = self.bulk_queue.pull_samples() if not raw_samples: return {} @@ -203,7 +193,7 @@ def _process_batch(self, eventtime): if not samples: return {} return {'data': samples, 'errors': self.last_error_count, - 'overflows': self.clock_updater.get_last_limit_count()} + 'overflows': self.clock_updater.get_last_overflows()} def load_config(config): return MPU9250(config) diff --git a/src/Kconfig b/src/Kconfig index aaf506539..91376910c 100644 --- a/src/Kconfig +++ b/src/Kconfig @@ -112,6 +112,10 @@ config WANT_SOFTWARE_SPI bool depends on HAVE_GPIO && HAVE_GPIO_SPI default y +config NEED_SENSOR_BULK + bool + depends on WANT_SENSORS || WANT_LIS2DW + default y menu "Optional features (to reduce code size)" depends on HAVE_LIMITED_CODE_SIZE config WANT_GPIO_BITBANGING diff --git a/src/Makefile b/src/Makefile index 8d771f9eb..eddad9783 100644 --- a/src/Makefile +++ b/src/Makefile @@ -16,6 +16,7 @@ src-$(CONFIG_WANT_SOFTWARE_SPI) += spi_software.c src-$(CONFIG_WANT_SOFTWARE_I2C) += i2c_software.c sensors-src-$(CONFIG_HAVE_GPIO_SPI) := thermocouple.c sensor_adxl345.c \ sensor_angle.c -src-$(CONFIG_WANT_LIS2DW) += sensor_lis2dw.c sensors-src-$(CONFIG_HAVE_GPIO_I2C) += sensor_mpu9250.c src-$(CONFIG_WANT_SENSORS) += $(sensors-src-y) +src-$(CONFIG_WANT_LIS2DW) += sensor_lis2dw.c +src-$(CONFIG_NEED_SENSOR_BULK) += sensor_bulk.c diff --git a/src/sensor_adxl345.c b/src/sensor_adxl345.c index 5ec3945e8..8cd471ef5 100644 --- a/src/sensor_adxl345.c +++ b/src/sensor_adxl345.c @@ -10,15 +10,15 @@ #include "basecmd.h" // oid_alloc #include "command.h" // DECL_COMMAND #include "sched.h" // DECL_TASK +#include "sensor_bulk.h" // sensor_bulk_report #include "spicmds.h" // spidev_transfer struct adxl345 { struct timer timer; uint32_t rest_ticks; struct spidev_s *spi; - uint16_t sequence, limit_count; - uint8_t flags, data_count; - uint8_t data[50]; + uint8_t flags; + struct sensor_bulk sb; }; enum { @@ -47,27 +47,6 @@ command_config_adxl345(uint32_t *args) } DECL_COMMAND(command_config_adxl345, "config_adxl345 oid=%c spi_oid=%c"); -// Report local measurement buffer -static void -adxl_report(struct adxl345 *ax, uint8_t oid) -{ - sendf("adxl345_data oid=%c sequence=%hu data=%*s" - , oid, ax->sequence, ax->data_count, ax->data); - ax->data_count = 0; - ax->sequence++; -} - -// Report buffer and fifo status -static void -adxl_status(struct adxl345 *ax, uint_fast8_t oid - , uint32_t time1, uint32_t time2, uint_fast8_t fifo) -{ - sendf("adxl345_status oid=%c clock=%u query_ticks=%u next_sequence=%hu" - " buffered=%c fifo=%c limit_count=%hu" - , oid, time1, time2-time1, ax->sequence - , ax->data_count, fifo, ax->limit_count); -} - // Helper code to reschedule the adxl345_event() timer static void adxl_reschedule_timer(struct adxl345 *ax) @@ -87,6 +66,8 @@ adxl_reschedule_timer(struct adxl345 *ax) #define SET_FIFO_CTL 0x90 +#define BYTES_PER_SAMPLE 5 + // Query accelerometer data static void adxl_query(struct adxl345 *ax, uint8_t oid) @@ -96,7 +77,7 @@ adxl_query(struct adxl345 *ax, uint8_t oid) spidev_transfer(ax->spi, 1, sizeof(msg), msg); // Extract x, y, z measurements uint_fast8_t fifo_status = msg[8] & ~0x80; // Ignore trigger bit - uint8_t *d = &ax->data[ax->data_count]; + uint8_t *d = &ax->sb.data[ax->sb.data_count]; if (((msg[2] & 0xf0) && (msg[2] & 0xf0) != 0xf0) || ((msg[4] & 0xf0) && (msg[4] & 0xf0) != 0xf0) || ((msg[6] & 0xf0) && (msg[6] & 0xf0) != 0xf0) @@ -112,12 +93,12 @@ adxl_query(struct adxl345 *ax, uint8_t oid) d[3] = (msg[2] & 0x1f) | (msg[6] << 5); // x high bits and z high bits d[4] = (msg[4] & 0x1f) | ((msg[6] << 2) & 0x60); // y high and z high } - ax->data_count += 5; - if (ax->data_count + 5 > ARRAY_SIZE(ax->data)) - adxl_report(ax, oid); + ax->sb.data_count += BYTES_PER_SAMPLE; + if (ax->sb.data_count + BYTES_PER_SAMPLE > ARRAY_SIZE(ax->sb.data)) + sensor_bulk_report(&ax->sb, oid); // Check fifo status if (fifo_status >= 31) - ax->limit_count++; + ax->sb.possible_overflows++; if (fifo_status > 1 && fifo_status <= 32) { // More data in fifo - wake this task again sched_wake_task(&adxl345_wake); @@ -166,8 +147,7 @@ command_query_adxl345(uint32_t *args) ax->timer.waketime = args[1]; ax->rest_ticks = args[2]; ax->flags = AX_HAVE_START; - ax->sequence = ax->limit_count = 0; - ax->data_count = 0; + sensor_bulk_reset(&ax->sb); sched_add_timer(&ax->timer); } DECL_COMMAND(command_query_adxl345, @@ -178,10 +158,17 @@ command_query_adxl345_status(uint32_t *args) { struct adxl345 *ax = oid_lookup(args[0], command_config_adxl345); uint8_t msg[2] = { AR_FIFO_STATUS | AM_READ, 0x00 }; + uint32_t time1 = timer_read_time(); spidev_transfer(ax->spi, 1, sizeof(msg), msg); uint32_t time2 = timer_read_time(); - adxl_status(ax, args[0], time1, time2, msg[1]); + + uint_fast8_t fifo_status = msg[1] & ~0x80; // Ignore trigger bit + if (fifo_status > 32) + // Query error - don't send response - host will retry + return; + sensor_bulk_status(&ax->sb, args[0], time1, time2-time1 + , fifo_status * BYTES_PER_SAMPLE); } DECL_COMMAND(command_query_adxl345_status, "query_adxl345_status oid=%c"); diff --git a/src/sensor_angle.c b/src/sensor_angle.c index 865670b89..54caecc21 100644 --- a/src/sensor_angle.c +++ b/src/sensor_angle.c @@ -10,6 +10,7 @@ #include "board/irq.h" // irq_disable #include "command.h" // DECL_COMMAND #include "sched.h" // DECL_TASK +#include "sensor_bulk.h" // sensor_bulk_report #include "spicmds.h" // spidev_transfer enum { SA_CHIP_A1333, SA_CHIP_AS5047D, SA_CHIP_TLE5012B, SA_CHIP_MAX }; @@ -29,15 +30,16 @@ struct spi_angle { struct timer timer; uint32_t rest_ticks; struct spidev_s *spi; - uint16_t sequence; - uint8_t flags, chip_type, data_count, time_shift, overflow; - uint8_t data[48]; + uint8_t flags, chip_type, time_shift, overflow; + struct sensor_bulk sb; }; enum { SA_PENDING = 1<<2, }; +#define BYTES_PER_SAMPLE 3 + static struct task_wake angle_wake; // Event handler that wakes spi_angle_task() periodically @@ -72,32 +74,22 @@ command_config_spi_angle(uint32_t *args) DECL_COMMAND(command_config_spi_angle, "config_spi_angle oid=%c spi_oid=%c spi_angle_type=%c"); -// Report local measurement buffer -static void -angle_report(struct spi_angle *sa, uint8_t oid) -{ - sendf("spi_angle_data oid=%c sequence=%hu data=%*s" - , oid, sa->sequence, sa->data_count, sa->data); - sa->data_count = 0; - sa->sequence++; -} - // Send spi_angle_data message if buffer is full static void angle_check_report(struct spi_angle *sa, uint8_t oid) { - if (sa->data_count + 3 > ARRAY_SIZE(sa->data)) - angle_report(sa, oid); + if (sa->sb.data_count + BYTES_PER_SAMPLE > ARRAY_SIZE(sa->sb.data)) + sensor_bulk_report(&sa->sb, oid); } // Add an entry to the measurement buffer static void angle_add(struct spi_angle *sa, uint_fast8_t tcode, uint_fast16_t data) { - sa->data[sa->data_count] = tcode; - sa->data[sa->data_count + 1] = data; - sa->data[sa->data_count + 2] = data >> 8; - sa->data_count += 3; + sa->sb.data[sa->sb.data_count] = tcode; + sa->sb.data[sa->sb.data_count + 1] = data; + sa->sb.data[sa->sb.data_count + 2] = data >> 8; + sa->sb.data_count += BYTES_PER_SAMPLE; } // Add an error indicator to the measurement buffer @@ -237,8 +229,7 @@ command_query_spi_angle(uint32_t *args) // Start new measurements query sa->timer.waketime = args[1]; sa->rest_ticks = args[2]; - sa->sequence = 0; - sa->data_count = 0; + sensor_bulk_reset(&sa->sb); sa->time_shift = args[3]; sched_add_timer(&sa->timer); } diff --git a/src/sensor_bulk.c b/src/sensor_bulk.c new file mode 100644 index 000000000..9b5c782c5 --- /dev/null +++ b/src/sensor_bulk.c @@ -0,0 +1,38 @@ +// Helper code for collecting and sending bulk sensor measurements +// +// Copyright (C) 2020-2023 Kevin O'Connor +// +// This file may be distributed under the terms of the GNU GPLv3 license. + +#include "command.h" // sendf +#include "sensor_bulk.h" // sensor_bulk_report + +// Reset counters +void +sensor_bulk_reset(struct sensor_bulk *sb) +{ + sb->sequence = 0; + sb->possible_overflows = 0; + sb->data_count = 0; +} + +// Report local measurement buffer +void +sensor_bulk_report(struct sensor_bulk *sb, uint8_t oid) +{ + sendf("sensor_bulk_data oid=%c sequence=%hu data=%*s" + , oid, sb->sequence, sb->data_count, sb->data); + sb->data_count = 0; + sb->sequence++; +} + +// Report buffer and fifo status +void +sensor_bulk_status(struct sensor_bulk *sb, uint8_t oid + , uint32_t time1, uint32_t query_ticks, uint32_t fifo) +{ + sendf("sensor_bulk_status oid=%c clock=%u query_ticks=%u next_sequence=%hu" + " buffered=%u possible_overflows=%hu" + , oid, time1, query_ticks, sb->sequence + , sb->data_count + fifo, sb->possible_overflows); +} diff --git a/src/sensor_bulk.h b/src/sensor_bulk.h new file mode 100644 index 000000000..9c130bea3 --- /dev/null +++ b/src/sensor_bulk.h @@ -0,0 +1,15 @@ +#ifndef __SENSOR_BULK_H +#define __SENSOR_BULK_H + +struct sensor_bulk { + uint16_t sequence, possible_overflows; + uint8_t data_count; + uint8_t data[52]; +}; + +void sensor_bulk_reset(struct sensor_bulk *sb); +void sensor_bulk_report(struct sensor_bulk *sb, uint8_t oid); +void sensor_bulk_status(struct sensor_bulk *sb, uint8_t oid + , uint32_t time1, uint32_t query_ticks, uint32_t fifo); + +#endif // sensor_bulk.h diff --git a/src/sensor_lis2dw.c b/src/sensor_lis2dw.c index 06dd32068..579ee1f71 100644 --- a/src/sensor_lis2dw.c +++ b/src/sensor_lis2dw.c @@ -11,6 +11,7 @@ #include "basecmd.h" // oid_alloc #include "command.h" // DECL_COMMAND #include "sched.h" // DECL_TASK +#include "sensor_bulk.h" // sensor_bulk_report #include "spicmds.h" // spidev_transfer #define LIS_AR_DATAX0 0x28 @@ -18,13 +19,14 @@ #define LIS_FIFO_CTRL 0x2E #define LIS_FIFO_SAMPLES 0x2F +#define BYTES_PER_SAMPLE 6 + struct lis2dw { struct timer timer; uint32_t rest_ticks; struct spidev_s *spi; - uint16_t sequence, limit_count; - uint8_t flags, data_count; - uint8_t data[48]; + uint8_t flags; + struct sensor_bulk sb; }; enum { @@ -53,27 +55,6 @@ command_config_lis2dw(uint32_t *args) } DECL_COMMAND(command_config_lis2dw, "config_lis2dw oid=%c spi_oid=%c"); -// Report local measurement buffer -static void -lis2dw_report(struct lis2dw *ax, uint8_t oid) -{ - sendf("lis2dw_data oid=%c sequence=%hu data=%*s" - , oid, ax->sequence, ax->data_count, ax->data); - ax->data_count = 0; - ax->sequence++; -} - -// Report buffer and fifo status -static void -lis2dw_status(struct lis2dw *ax, uint_fast8_t oid - , uint32_t time1, uint32_t time2, uint_fast8_t fifo) -{ - sendf("lis2dw_status oid=%c clock=%u query_ticks=%u next_sequence=%hu" - " buffered=%c fifo=%c limit_count=%hu" - , oid, time1, time2-time1, ax->sequence - , ax->data_count, fifo, ax->limit_count); -} - // Helper code to reschedule the lis2dw_event() timer static void lis2dw_reschedule_timer(struct lis2dw *ax) @@ -93,7 +74,7 @@ lis2dw_query(struct lis2dw *ax, uint8_t oid) uint8_t fifo_empty,fifo_ovrn = 0; msg[0] = LIS_AR_DATAX0 | LIS_AM_READ ; - uint8_t *d = &ax->data[ax->data_count]; + uint8_t *d = &ax->sb.data[ax->sb.data_count]; spidev_transfer(ax->spi, 1, sizeof(msg), msg); @@ -108,13 +89,13 @@ lis2dw_query(struct lis2dw *ax, uint8_t oid) d[4] = msg[5]; // z low bits d[5] = msg[6]; // z high bits - ax->data_count += 6; - if (ax->data_count + 6 > ARRAY_SIZE(ax->data)) - lis2dw_report(ax, oid); + ax->sb.data_count += BYTES_PER_SAMPLE; + if (ax->sb.data_count + BYTES_PER_SAMPLE > ARRAY_SIZE(ax->sb.data)) + sensor_bulk_report(&ax->sb, oid); // Check fifo status if (fifo_ovrn) - ax->limit_count++; + ax->sb.possible_overflows++; // check if we need to run the task again (more packets in fifo?) if (!fifo_empty) { @@ -165,8 +146,7 @@ command_query_lis2dw(uint32_t *args) ax->timer.waketime = args[1]; ax->rest_ticks = args[2]; ax->flags = LIS_HAVE_START; - ax->sequence = ax->limit_count = 0; - ax->data_count = 0; + sensor_bulk_reset(&ax->sb); sched_add_timer(&ax->timer); } DECL_COMMAND(command_query_lis2dw, @@ -180,7 +160,8 @@ command_query_lis2dw_status(uint32_t *args) uint32_t time1 = timer_read_time(); spidev_transfer(ax->spi, 1, sizeof(msg), msg); uint32_t time2 = timer_read_time(); - lis2dw_status(ax, args[0], time1, time2, msg[1]&0x1f); + sensor_bulk_status(&ax->sb, args[0], time1, time2-time1 + , (msg[1] & 0x1f) * BYTES_PER_SAMPLE); } DECL_COMMAND(command_query_lis2dw_status, "query_lis2dw_status oid=%c"); diff --git a/src/sensor_mpu9250.c b/src/sensor_mpu9250.c index 7792b4d80..d52de811d 100644 --- a/src/sensor_mpu9250.c +++ b/src/sensor_mpu9250.c @@ -12,6 +12,7 @@ #include "basecmd.h" // oid_alloc #include "command.h" // DECL_COMMAND #include "sched.h" // DECL_TASK +#include "sensor_bulk.h" // sensor_bulk_report #include "board/gpio.h" // i2c_read #include "i2ccmds.h" // i2cdev_oid_lookup @@ -46,11 +47,9 @@ struct mpu9250 { struct timer timer; uint32_t rest_ticks; struct i2cdev_s *i2c; - uint16_t sequence, limit_count, fifo_max, fifo_pkts_bytes; - uint8_t flags, data_count; - // msg size must be <= 255 due to Klipper api - // = SAMPLES_PER_BLOCK (from mpu9250.py) * BYTES_PER_FIFO_ENTRY + 1 - uint8_t data[BYTES_PER_BLOCK]; + uint16_t fifo_max, fifo_pkts_bytes; + uint8_t flags; + struct sensor_bulk sb; }; enum { @@ -92,27 +91,6 @@ command_config_mpu9250(uint32_t *args) } DECL_COMMAND(command_config_mpu9250, "config_mpu9250 oid=%c i2c_oid=%c"); -// Report local measurement buffer -static void -mp9250_report(struct mpu9250 *mp, uint8_t oid) -{ - sendf("mpu9250_data oid=%c sequence=%hu data=%*s" - , oid, mp->sequence, mp->data_count, mp->data); - mp->data_count = 0; - mp->sequence++; -} - -// Report buffer and fifo status -static void -mp9250_status(struct mpu9250 *mp, uint_fast8_t oid - , uint32_t time1, uint32_t time2, uint16_t fifo) -{ - sendf("mpu9250_status oid=%c clock=%u query_ticks=%u next_sequence=%hu" - " buffered=%c fifo=%u limit_count=%hu" - , oid, time1, time2-time1, mp->sequence - , mp->data_count, fifo, mp->limit_count); -} - // Helper code to reschedule the mpu9250_event() timer static void mp9250_reschedule_timer(struct mpu9250 *mp) @@ -135,10 +113,10 @@ mp9250_query(struct mpu9250 *mp, uint8_t oid) if (mp->fifo_pkts_bytes >= BYTES_PER_BLOCK) { uint8_t reg = AR_FIFO; i2c_read(mp->i2c->i2c_config, sizeof(reg), ® - , BYTES_PER_BLOCK, &mp->data[0]); - mp->data_count = BYTES_PER_BLOCK; + , BYTES_PER_BLOCK, &mp->sb.data[0]); + mp->sb.data_count = BYTES_PER_BLOCK; mp->fifo_pkts_bytes -= BYTES_PER_BLOCK; - mp9250_report(mp, oid); + sensor_bulk_report(&mp->sb, oid); } // If we have enough bytes remaining to fill another report wake again @@ -214,9 +192,7 @@ command_query_mpu9250(uint32_t *args) mp->timer.waketime = args[1]; mp->rest_ticks = args[2]; mp->flags = AX_HAVE_START; - mp->sequence = 0; - mp->limit_count = 0; - mp->data_count = 0; + sensor_bulk_reset(&mp->sb); mp->fifo_max = 0; mp->fifo_pkts_bytes = 0; sched_add_timer(&mp->timer); @@ -235,7 +211,7 @@ command_query_mpu9250_status(uint32_t *args) i2c_read(mp->i2c->i2c_config, sizeof(int_reg), int_reg, sizeof(int_msg), &int_msg); if (int_msg & FIFO_OVERFLOW_INT) - mp->limit_count++; + mp->sb.possible_overflows++; // Read latest FIFO count (with precise timing) uint8_t reg[] = {AR_FIFO_COUNT_H}; @@ -246,7 +222,7 @@ command_query_mpu9250_status(uint32_t *args) uint16_t fifo_bytes = ((msg[0] & 0x1f) << 8) | msg[1]; // Report status - mp9250_status(mp, args[0], time1, time2, fifo_bytes / BYTES_PER_FIFO_ENTRY); + sensor_bulk_status(&mp->sb, args[0], time1, time2-time1, fifo_bytes); } DECL_COMMAND(command_query_mpu9250_status, "query_mpu9250_status oid=%c"); From 2dc4cfc5df3bd2b3e040a73b86d59c7a3883fc7f Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Wed, 27 Dec 2023 12:58:53 -0500 Subject: [PATCH 45/63] bulk_sensor: Don't assume chip_clock is zero on start of queries Send an explicit clock query in ChipClockUpdater to seed the initial clock. Signed-off-by: Kevin O'Connor --- klippy/extras/adxl345.py | 2 +- klippy/extras/bulk_sensor.py | 16 ++++++++++------ klippy/extras/lis2dw.py | 2 +- klippy/extras/mpu9250.py | 2 +- 4 files changed, 13 insertions(+), 9 deletions(-) diff --git a/klippy/extras/adxl345.py b/klippy/extras/adxl345.py index b91224d50..45d8369f0 100644 --- a/klippy/extras/adxl345.py +++ b/klippy/extras/adxl345.py @@ -310,7 +310,7 @@ def _start_measurements(self): reqclock=reqclock) logging.info("ADXL345 starting '%s' measurements", self.name) # Initialize clock tracking - self.clock_updater.note_start(reqclock) + self.clock_updater.note_start() self.last_error_count = 0 def _finish_measurements(self): # Halt bulk reading diff --git a/klippy/extras/bulk_sensor.py b/klippy/extras/bulk_sensor.py index df5a5da25..ad486bc4b 100644 --- a/klippy/extras/bulk_sensor.py +++ b/klippy/extras/bulk_sensor.py @@ -222,15 +222,15 @@ def get_last_overflows(self): return self.last_overflows def clear_duration_filter(self): self.max_query_duration = 1 << 31 - def note_start(self, reqclock): + def note_start(self): self.last_sequence = 0 self.last_overflows = 0 - self.clock_sync.reset(reqclock, 0) + # Set initial clock self.clear_duration_filter() - self.update_clock(minclock=reqclock) + self.update_clock(is_reset=True) self.clear_duration_filter() - def update_clock(self, minclock=0): - params = self.query_status_cmd.send([self.oid], minclock=minclock) + def update_clock(self, is_reset=False): + params = self.query_status_cmd.send([self.oid]) mcu_clock = self.mcu.clock32_to_clock64(params['clock']) seq_diff = (params['next_sequence'] - self.last_sequence) & 0xffff self.last_sequence += seq_diff @@ -250,4 +250,8 @@ def update_clock(self, minclock=0): # inaccuracy of query responses and plus .5 for assumed offset # of hardware processing time. chip_clock = msg_count + 1 - self.clock_sync.update(mcu_clock + duration // 2, chip_clock) + avg_mcu_clock = mcu_clock + duration // 2 + if is_reset: + self.clock_sync.reset(avg_mcu_clock, chip_clock) + else: + self.clock_sync.update(avg_mcu_clock, chip_clock) diff --git a/klippy/extras/lis2dw.py b/klippy/extras/lis2dw.py index 74911e6fb..469a4f0d6 100644 --- a/klippy/extras/lis2dw.py +++ b/klippy/extras/lis2dw.py @@ -162,7 +162,7 @@ def _start_measurements(self): reqclock=reqclock) logging.info("LIS2DW starting '%s' measurements", self.name) # Initialize clock tracking - self.clock_updater.note_start(reqclock) + self.clock_updater.note_start() self.last_error_count = 0 def _finish_measurements(self): # Halt bulk reading diff --git a/klippy/extras/mpu9250.py b/klippy/extras/mpu9250.py index 4626e1c0c..421274bad 100644 --- a/klippy/extras/mpu9250.py +++ b/klippy/extras/mpu9250.py @@ -175,7 +175,7 @@ def _start_measurements(self): reqclock=reqclock) logging.info("MPU9250 starting '%s' measurements", self.name) # Initialize clock tracking - self.clock_updater.note_start(reqclock) + self.clock_updater.note_start() self.last_error_count = 0 def _finish_measurements(self): # Halt bulk reading From 6f0e91f69fa813fc19833ebeed9b1ccde0ba97ee Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Wed, 27 Dec 2023 13:14:53 -0500 Subject: [PATCH 46/63] sensor_adxl345: No need to schedule start of bulk reading It's simpler and faster to enable the adxl345 in the python code. Signed-off-by: Kevin O'Connor --- klippy/extras/adxl345.py | 16 +++++------- src/sensor_adxl345.c | 55 +++++++++------------------------------- 2 files changed, 18 insertions(+), 53 deletions(-) diff --git a/klippy/extras/adxl345.py b/klippy/extras/adxl345.py index 45d8369f0..738903f38 100644 --- a/klippy/extras/adxl345.py +++ b/klippy/extras/adxl345.py @@ -184,8 +184,6 @@ def read_axes_map(config): raise config.error("Invalid axes_map parameter") return [am[a.strip()] for a in axes_map] -MIN_MSG_TIME = 0.100 - BYTES_PER_SAMPLE = 5 SAMPLES_PER_BLOCK = bulk_sensor.MAX_BULK_MSG_SIZE // BYTES_PER_SAMPLE @@ -207,7 +205,7 @@ def __init__(self, config): self.query_adxl345_cmd = None mcu.add_config_cmd("config_adxl345 oid=%d spi_oid=%d" % (oid, self.spi.get_oid())) - mcu.add_config_cmd("query_adxl345 oid=%d clock=0 rest_ticks=0" + mcu.add_config_cmd("query_adxl345 oid=%d rest_ticks=0" % (oid,), on_restart=True) mcu.register_config_callback(self._build_config) self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, oid=oid) @@ -228,7 +226,7 @@ def __init__(self, config): def _build_config(self): cmdqueue = self.spi.get_command_queue() self.query_adxl345_cmd = self.mcu.lookup_command( - "query_adxl345 oid=%c clock=%u rest_ticks=%u", cq=cmdqueue) + "query_adxl345 oid=%c rest_ticks=%u", cq=cmdqueue) self.clock_updater.setup_query_command( self.mcu, "query_adxl345_status oid=%c", oid=self.oid, cq=cmdqueue) def read_reg(self, reg): @@ -302,19 +300,17 @@ def _start_measurements(self): self.set_reg(REG_FIFO_CTL, SET_FIFO_CTL) # Start bulk reading self.bulk_queue.clear_samples() - systime = self.printer.get_reactor().monotonic() - print_time = self.mcu.estimated_print_time(systime) + MIN_MSG_TIME - reqclock = self.mcu.print_time_to_clock(print_time) rest_ticks = self.mcu.seconds_to_clock(4. / self.data_rate) - self.query_adxl345_cmd.send([self.oid, reqclock, rest_ticks], - reqclock=reqclock) + self.query_adxl345_cmd.send([self.oid, rest_ticks]) + self.set_reg(REG_POWER_CTL, 0x08) logging.info("ADXL345 starting '%s' measurements", self.name) # Initialize clock tracking self.clock_updater.note_start() self.last_error_count = 0 def _finish_measurements(self): # Halt bulk reading - self.query_adxl345_cmd.send_wait_ack([self.oid, 0, 0]) + self.set_reg(REG_POWER_CTL, 0x00) + self.query_adxl345_cmd.send_wait_ack([self.oid, 0]) self.bulk_queue.clear_samples() logging.info("ADXL345 finished '%s' measurements", self.name) def _process_batch(self, eventtime): diff --git a/src/sensor_adxl345.c b/src/sensor_adxl345.c index 8cd471ef5..32ce4c653 100644 --- a/src/sensor_adxl345.c +++ b/src/sensor_adxl345.c @@ -1,6 +1,6 @@ // Support for gathering acceleration data from ADXL345 chip // -// Copyright (C) 2020 Kevin O'Connor +// Copyright (C) 2020-2023 Kevin O'Connor // // This file may be distributed under the terms of the GNU GPLv3 license. @@ -22,7 +22,7 @@ struct adxl345 { }; enum { - AX_HAVE_START = 1<<0, AX_RUNNING = 1<<1, AX_PENDING = 1<<2, + AX_PENDING = 1<<0, }; static struct task_wake adxl345_wake; @@ -58,7 +58,6 @@ adxl_reschedule_timer(struct adxl345 *ax) } // Chip registers -#define AR_POWER_CTL 0x2D #define AR_DATAX0 0x32 #define AR_FIFO_STATUS 0x39 #define AM_READ 0x80 @@ -99,59 +98,33 @@ adxl_query(struct adxl345 *ax, uint8_t oid) // Check fifo status if (fifo_status >= 31) ax->sb.possible_overflows++; - if (fifo_status > 1 && fifo_status <= 32) { + if (fifo_status > 1) { // More data in fifo - wake this task again sched_wake_task(&adxl345_wake); - } else if (ax->flags & AX_RUNNING) { + } else { // Sleep until next check time - sched_del_timer(&ax->timer); ax->flags &= ~AX_PENDING; adxl_reschedule_timer(ax); } } -// Startup measurements -static void -adxl_start(struct adxl345 *ax, uint8_t oid) -{ - sched_del_timer(&ax->timer); - ax->flags = AX_RUNNING; - uint8_t msg[2] = { AR_POWER_CTL, 0x08 }; - spidev_transfer(ax->spi, 0, sizeof(msg), msg); - adxl_reschedule_timer(ax); -} - -// End measurements -static void -adxl_stop(struct adxl345 *ax, uint8_t oid) -{ - // Disable measurements - sched_del_timer(&ax->timer); - ax->flags = 0; - uint8_t msg[2] = { AR_POWER_CTL, 0x00 }; - spidev_transfer(ax->spi, 0, sizeof(msg), msg); -} - void command_query_adxl345(uint32_t *args) { struct adxl345 *ax = oid_lookup(args[0], command_config_adxl345); - if (!args[2]) { + sched_del_timer(&ax->timer); + ax->flags = 0; + if (!args[1]) // End measurements - adxl_stop(ax, args[0]); return; - } + // Start new measurements query - sched_del_timer(&ax->timer); - ax->timer.waketime = args[1]; - ax->rest_ticks = args[2]; - ax->flags = AX_HAVE_START; + ax->rest_ticks = args[1]; sensor_bulk_reset(&ax->sb); - sched_add_timer(&ax->timer); + adxl_reschedule_timer(ax); } -DECL_COMMAND(command_query_adxl345, - "query_adxl345 oid=%c clock=%u rest_ticks=%u"); +DECL_COMMAND(command_query_adxl345, "query_adxl345 oid=%c rest_ticks=%u"); void command_query_adxl345_status(uint32_t *args) @@ -181,11 +154,7 @@ adxl345_task(void) struct adxl345 *ax; foreach_oid(oid, ax, command_config_adxl345) { uint_fast8_t flags = ax->flags; - if (!(flags & AX_PENDING)) - continue; - if (flags & AX_HAVE_START) - adxl_start(ax, oid); - else + if (flags & AX_PENDING) adxl_query(ax, oid); } } From d853c1981107e6602c65285f91c805e8f33c3846 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Wed, 27 Dec 2023 12:02:10 -0500 Subject: [PATCH 47/63] sensor_mpu9250: No need to schedule start of bulk reading It's simpler and faster to enable the mpu9250 in the python code. Signed-off-by: Kevin O'Connor --- klippy/extras/mpu9250.py | 26 ++++---- src/sensor_mpu9250.c | 125 +++++++++------------------------------ 2 files changed, 43 insertions(+), 108 deletions(-) diff --git a/klippy/extras/mpu9250.py b/klippy/extras/mpu9250.py index 421274bad..b4b8c4391 100644 --- a/klippy/extras/mpu9250.py +++ b/klippy/extras/mpu9250.py @@ -30,6 +30,7 @@ REG_USER_CTRL = 0x6A REG_PWR_MGMT_1 = 0x6B REG_PWR_MGMT_2 = 0x6C +REG_INT_STATUS = 0x3A SAMPLE_RATE_DIVS = { 4000:0x00 } @@ -40,6 +41,10 @@ SET_PWR_MGMT_1_SLEEP= 0x40 SET_PWR_MGMT_2_ACCEL_ON = 0x07 SET_PWR_MGMT_2_OFF = 0x3F +SET_USER_FIFO_RESET = 0x04 +SET_USER_FIFO_EN = 0x40 +SET_ENABLE_FIFO = 0x08 +SET_DISABLE_FIFO = 0x00 FREEFALL_ACCEL = 9.80665 * 1000. # SCALE = 1/4096 g/LSB @8g scale * Earth gravity in mm/s**2 @@ -47,8 +52,6 @@ FIFO_SIZE = 512 -MIN_MSG_TIME = 0.100 - BYTES_PER_SAMPLE = 6 SAMPLES_PER_BLOCK = bulk_sensor.MAX_BULK_MSG_SIZE // BYTES_PER_SAMPLE @@ -90,10 +93,10 @@ def _build_config(self): cmdqueue = self.i2c.get_command_queue() self.mcu.add_config_cmd("config_mpu9250 oid=%d i2c_oid=%d" % (self.oid, self.i2c.get_oid())) - self.mcu.add_config_cmd("query_mpu9250 oid=%d clock=0 rest_ticks=0" + self.mcu.add_config_cmd("query_mpu9250 oid=%d rest_ticks=0" % (self.oid,), on_restart=True) self.query_mpu9250_cmd = self.mcu.lookup_command( - "query_mpu9250 oid=%c clock=%u rest_ticks=%u", cq=cmdqueue) + "query_mpu9250 oid=%c rest_ticks=%u", cq=cmdqueue) self.clock_updater.setup_query_command( self.mcu, "query_mpu9250_status oid=%c", oid=self.oid, cq=cmdqueue) def read_reg(self, reg): @@ -164,22 +167,25 @@ def _start_measurements(self): self.set_reg(REG_CONFIG, SET_CONFIG) self.set_reg(REG_ACCEL_CONFIG, SET_ACCEL_CONFIG) self.set_reg(REG_ACCEL_CONFIG2, SET_ACCEL_CONFIG2) + # Reset fifo + self.set_reg(REG_FIFO_EN, SET_DISABLE_FIFO) + self.set_reg(REG_USER_CTRL, SET_USER_FIFO_RESET) + self.set_reg(REG_USER_CTRL, SET_USER_FIFO_EN) + self.read_reg(REG_INT_STATUS) # clear FIFO overflow flag # Start bulk reading self.bulk_queue.clear_samples() - systime = self.printer.get_reactor().monotonic() - print_time = self.mcu.estimated_print_time(systime) + MIN_MSG_TIME - reqclock = self.mcu.print_time_to_clock(print_time) rest_ticks = self.mcu.seconds_to_clock(4. / self.data_rate) - self.query_mpu9250_cmd.send([self.oid, reqclock, rest_ticks], - reqclock=reqclock) + self.query_mpu9250_cmd.send([self.oid, rest_ticks]) + self.set_reg(REG_FIFO_EN, SET_ENABLE_FIFO) logging.info("MPU9250 starting '%s' measurements", self.name) # Initialize clock tracking self.clock_updater.note_start() self.last_error_count = 0 def _finish_measurements(self): # Halt bulk reading - self.query_mpu9250_cmd.send_wait_ack([self.oid, 0, 0]) + self.set_reg(REG_FIFO_EN, SET_DISABLE_FIFO) + self.query_mpu9250_cmd.send_wait_ack([self.oid, 0]) self.bulk_queue.clear_samples() logging.info("MPU9250 finished '%s' measurements", self.name) self.set_reg(REG_PWR_MGMT_1, SET_PWR_MGMT_1_SLEEP) diff --git a/src/sensor_mpu9250.c b/src/sensor_mpu9250.c index d52de811d..23c029211 100644 --- a/src/sensor_mpu9250.c +++ b/src/sensor_mpu9250.c @@ -2,7 +2,7 @@ // // Copyright (C) 2023 Matthew Swabey // Copyright (C) 2022 Harry Beyel -// Copyright (C) 2020-2021 Kevin O'Connor +// Copyright (C) 2020-2023 Kevin O'Connor // // This file may be distributed under the terms of the GNU GPLv3 license. @@ -17,27 +17,10 @@ #include "i2ccmds.h" // i2cdev_oid_lookup // Chip registers -#define AR_FIFO_SIZE 512 - -#define AR_PWR_MGMT_1 0x6B -#define AR_PWR_MGMT_2 0x6C -#define AR_FIFO_EN 0x23 -#define AR_ACCEL_OUT_XH 0x3B -#define AR_USER_CTRL 0x6A #define AR_FIFO_COUNT_H 0x72 #define AR_FIFO 0x74 #define AR_INT_STATUS 0x3A -#define SET_ENABLE_FIFO 0x08 -#define SET_DISABLE_FIFO 0x00 -#define SET_USER_FIFO_RESET 0x04 -#define SET_USER_FIFO_EN 0x40 - -#define SET_PWR_SLEEP 0x40 -#define SET_PWR_WAKE 0x00 -#define SET_PWR_2_ACCEL 0x07 // only enable accelerometers -#define SET_PWR_2_NONE 0x3F // disable all sensors - #define FIFO_OVERFLOW_INT 0x10 #define BYTES_PER_FIFO_ENTRY 6 @@ -53,24 +36,11 @@ struct mpu9250 { }; enum { - AX_HAVE_START = 1<<0, AX_RUNNING = 1<<1, AX_PENDING = 1<<2, + AX_PENDING = 1<<0, }; static struct task_wake mpu9250_wake; -// Reads the fifo byte count from the device. -static uint16_t -get_fifo_status (struct mpu9250 *mp) -{ - uint8_t reg[] = {AR_FIFO_COUNT_H}; - uint8_t msg[2]; - i2c_read(mp->i2c->i2c_config, sizeof(reg), reg, sizeof(msg), msg); - uint16_t fifo_bytes = ((msg[0] & 0x1f) << 8) | msg[1]; - if (fifo_bytes > mp->fifo_max) - mp->fifo_max = fifo_bytes; - return fifo_bytes; -} - // Event handler that wakes mpu9250_task() periodically static uint_fast8_t mpu9250_event(struct timer *timer) @@ -101,6 +71,19 @@ mp9250_reschedule_timer(struct mpu9250 *mp) irq_enable(); } +// Reads the fifo byte count from the device. +static uint16_t +get_fifo_status(struct mpu9250 *mp) +{ + uint8_t reg[] = {AR_FIFO_COUNT_H}; + uint8_t msg[2]; + i2c_read(mp->i2c->i2c_config, sizeof(reg), reg, sizeof(msg), msg); + uint16_t fifo_bytes = ((msg[0] & 0x1f) << 8) | msg[1]; + if (fifo_bytes > mp->fifo_max) + mp->fifo_max = fifo_bytes; + return fifo_bytes; +} + // Query accelerometer data static void mp9250_query(struct mpu9250 *mp, uint8_t oid) @@ -123,82 +106,35 @@ mp9250_query(struct mpu9250 *mp, uint8_t oid) // otherwise schedule timed wakeup if (mp->fifo_pkts_bytes >= BYTES_PER_BLOCK) { sched_wake_task(&mpu9250_wake); - } else if (mp->flags & AX_RUNNING) { - sched_del_timer(&mp->timer); + } else { mp->flags &= ~AX_PENDING; mp9250_reschedule_timer(mp); } } -// Startup measurements -static void -mp9250_start(struct mpu9250 *mp, uint8_t oid) -{ - sched_del_timer(&mp->timer); - mp->flags = AX_RUNNING; - uint8_t msg[2]; - - msg[0] = AR_FIFO_EN; - msg[1] = SET_DISABLE_FIFO; // disable FIFO - i2c_write(mp->i2c->i2c_config, sizeof(msg), msg); - - msg[0] = AR_USER_CTRL; - msg[1] = SET_USER_FIFO_RESET; // reset FIFO buffer - i2c_write(mp->i2c->i2c_config, sizeof(msg), msg); - - msg[0] = AR_USER_CTRL; - msg[1] = SET_USER_FIFO_EN; // enable FIFO buffer access - i2c_write(mp->i2c->i2c_config, sizeof(msg), msg); - - uint8_t int_reg[] = {AR_INT_STATUS}; // clear FIFO overflow flag - i2c_read(mp->i2c->i2c_config, sizeof(int_reg), int_reg, 1, msg); - - msg[0] = AR_FIFO_EN; - msg[1] = SET_ENABLE_FIFO; // enable accel output to FIFO - i2c_write(mp->i2c->i2c_config, sizeof(msg), msg); - - mp9250_reschedule_timer(mp); -} - -// End measurements -static void -mp9250_stop(struct mpu9250 *mp, uint8_t oid) -{ - // Disable measurements - sched_del_timer(&mp->timer); - mp->flags = 0; - - // disable accel FIFO - uint8_t msg[2] = { AR_FIFO_EN, SET_DISABLE_FIFO }; - i2c_write(mp->i2c->i2c_config, sizeof(msg), msg); - - // Uncomment and rebuilt to check for FIFO overruns when tuning - //output("mpu9240 limit_count=%u fifo_max=%u", - // mp->limit_count, mp->fifo_max); -} - void command_query_mpu9250(uint32_t *args) { struct mpu9250 *mp = oid_lookup(args[0], command_config_mpu9250); - if (!args[2]) { + sched_del_timer(&mp->timer); + mp->flags = 0; + if (!args[1]) { // End measurements - mp9250_stop(mp, args[0]); + + // Uncomment and rebuilt to check for FIFO overruns when tuning + //output("mpu9240 fifo_max=%u", mp->fifo_max); return; } + // Start new measurements query - sched_del_timer(&mp->timer); - mp->timer.waketime = args[1]; - mp->rest_ticks = args[2]; - mp->flags = AX_HAVE_START; + mp->rest_ticks = args[1]; sensor_bulk_reset(&mp->sb); mp->fifo_max = 0; mp->fifo_pkts_bytes = 0; - sched_add_timer(&mp->timer); + mp9250_reschedule_timer(mp); } -DECL_COMMAND(command_query_mpu9250, - "query_mpu9250 oid=%c clock=%u rest_ticks=%u"); +DECL_COMMAND(command_query_mpu9250, "query_mpu9250 oid=%c rest_ticks=%u"); void command_query_mpu9250_status(uint32_t *args) @@ -235,15 +171,8 @@ mpu9250_task(void) struct mpu9250 *mp; foreach_oid(oid, mp, command_config_mpu9250) { uint_fast8_t flags = mp->flags; - if (!(flags & AX_PENDING)) { - continue; - } - if (flags & AX_HAVE_START) { - mp9250_start(mp, oid); - } - else { + if (flags & AX_PENDING) mp9250_query(mp, oid); - } } } DECL_TASK(mpu9250_task); From d785b396a72b57c073ce5bdf7a9d5e9fe39fc914 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Wed, 27 Dec 2023 13:36:21 -0500 Subject: [PATCH 48/63] sensor_lis2dw: No need to schedule start of bulk reading It's simpler and faster to enable the lis2dw in the python code. Signed-off-by: Kevin O'Connor --- klippy/extras/lis2dw.py | 18 ++++++-------- src/sensor_lis2dw.c | 53 +++++++++-------------------------------- 2 files changed, 18 insertions(+), 53 deletions(-) diff --git a/klippy/extras/lis2dw.py b/klippy/extras/lis2dw.py index 469a4f0d6..e96313027 100644 --- a/klippy/extras/lis2dw.py +++ b/klippy/extras/lis2dw.py @@ -1,7 +1,7 @@ # Support for reading acceleration data from an LIS2DW chip # # Copyright (C) 2023 Zhou.XianMing -# Copyright (C) 2020-2021 Kevin O'Connor +# Copyright (C) 2020-2023 Kevin O'Connor # # This file may be distributed under the terms of the GNU GPLv3 license. import logging @@ -30,8 +30,6 @@ FREEFALL_ACCEL = 9.80665 SCALE = FREEFALL_ACCEL * 1.952 / 4 -MIN_MSG_TIME = 0.100 - BYTES_PER_SAMPLE = 6 SAMPLES_PER_BLOCK = bulk_sensor.MAX_BULK_MSG_SIZE // BYTES_PER_SAMPLE @@ -51,7 +49,7 @@ def __init__(self, config): self.query_lis2dw_cmd = None mcu.add_config_cmd("config_lis2dw oid=%d spi_oid=%d" % (oid, self.spi.get_oid())) - mcu.add_config_cmd("query_lis2dw oid=%d clock=0 rest_ticks=0" + mcu.add_config_cmd("query_lis2dw oid=%d rest_ticks=0" % (oid,), on_restart=True) mcu.register_config_callback(self._build_config) self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, oid=oid) @@ -73,7 +71,7 @@ def __init__(self, config): def _build_config(self): cmdqueue = self.spi.get_command_queue() self.query_lis2dw_cmd = self.mcu.lookup_command( - "query_lis2dw oid=%c clock=%u rest_ticks=%u", cq=cmdqueue) + "query_lis2dw oid=%c rest_ticks=%u", cq=cmdqueue) self.clock_updater.setup_query_command( self.mcu, "query_lis2dw_status oid=%c", oid=self.oid, cq=cmdqueue) def read_reg(self, reg): @@ -154,19 +152,17 @@ def _start_measurements(self): # Start bulk reading self.bulk_queue.clear_samples() - systime = self.printer.get_reactor().monotonic() - print_time = self.mcu.estimated_print_time(systime) + MIN_MSG_TIME - reqclock = self.mcu.print_time_to_clock(print_time) rest_ticks = self.mcu.seconds_to_clock(4. / self.data_rate) - self.query_lis2dw_cmd.send([self.oid, reqclock, rest_ticks], - reqclock=reqclock) + self.query_lis2dw_cmd.send([self.oid, rest_ticks]) + self.set_reg(REG_LIS2DW_FIFO_CTRL, 0xC0) logging.info("LIS2DW starting '%s' measurements", self.name) # Initialize clock tracking self.clock_updater.note_start() self.last_error_count = 0 def _finish_measurements(self): # Halt bulk reading - self.query_lis2dw_cmd.send_wait_ack([self.oid, 0, 0]) + self.set_reg(REG_LIS2DW_FIFO_CTRL, 0x00) + self.query_lis2dw_cmd.send_wait_ack([self.oid, 0]) self.bulk_queue.clear_samples() logging.info("LIS2DW finished '%s' measurements", self.name) self.set_reg(REG_LIS2DW_FIFO_CTRL, 0x00) diff --git a/src/sensor_lis2dw.c b/src/sensor_lis2dw.c index 579ee1f71..83922003c 100644 --- a/src/sensor_lis2dw.c +++ b/src/sensor_lis2dw.c @@ -1,7 +1,7 @@ // Support for gathering acceleration data from LIS2DW chip // // Copyright (C) 2023 Zhou.XianMing -// Copyright (C) 2020 Kevin O'Connor +// Copyright (C) 2020-2023 Kevin O'Connor // // This file may be distributed under the terms of the GNU GPLv3 license. @@ -16,7 +16,6 @@ #define LIS_AR_DATAX0 0x28 #define LIS_AM_READ 0x80 -#define LIS_FIFO_CTRL 0x2E #define LIS_FIFO_SAMPLES 0x2F #define BYTES_PER_SAMPLE 6 @@ -30,7 +29,7 @@ struct lis2dw { }; enum { - LIS_HAVE_START = 1<<0, LIS_RUNNING = 1<<1, LIS_PENDING = 1<<2, + LIS_PENDING = 1<<0, }; static struct task_wake lis2dw_wake; @@ -101,56 +100,30 @@ lis2dw_query(struct lis2dw *ax, uint8_t oid) if (!fifo_empty) { // More data in fifo - wake this task again sched_wake_task(&lis2dw_wake); - } else if (ax->flags & LIS_RUNNING) { + } else { // Sleep until next check time - sched_del_timer(&ax->timer); ax->flags &= ~LIS_PENDING; lis2dw_reschedule_timer(ax); } } -// Startup measurements -static void -lis2dw_start(struct lis2dw *ax, uint8_t oid) -{ - sched_del_timer(&ax->timer); - ax->flags = LIS_RUNNING; - uint8_t ctrl[2] = {LIS_FIFO_CTRL , 0xC0}; - spidev_transfer(ax->spi, 0, sizeof(ctrl), ctrl); - lis2dw_reschedule_timer(ax); -} - -// End measurements -static void -lis2dw_stop(struct lis2dw *ax, uint8_t oid) -{ - // Disable measurements - sched_del_timer(&ax->timer); - ax->flags = 0; - uint8_t ctrl[2] = {LIS_FIFO_CTRL , 0}; - spidev_transfer(ax->spi, 0, sizeof(ctrl), ctrl); -} - void command_query_lis2dw(uint32_t *args) { struct lis2dw *ax = oid_lookup(args[0], command_config_lis2dw); - if (!args[2]) { + sched_del_timer(&ax->timer); + ax->flags = 0; + if (!args[1]) // End measurements - lis2dw_stop(ax, args[0]); return; - } + // Start new measurements query - sched_del_timer(&ax->timer); - ax->timer.waketime = args[1]; - ax->rest_ticks = args[2]; - ax->flags = LIS_HAVE_START; + ax->rest_ticks = args[1]; sensor_bulk_reset(&ax->sb); - sched_add_timer(&ax->timer); + lis2dw_reschedule_timer(ax); } -DECL_COMMAND(command_query_lis2dw, - "query_lis2dw oid=%c clock=%u rest_ticks=%u"); +DECL_COMMAND(command_query_lis2dw, "query_lis2dw oid=%c rest_ticks=%u"); void command_query_lis2dw_status(uint32_t *args) @@ -174,11 +147,7 @@ lis2dw_task(void) struct lis2dw *ax; foreach_oid(oid, ax, command_config_lis2dw) { uint_fast8_t flags = ax->flags; - if (!(flags & LIS_PENDING)) - continue; - if (flags & LIS_HAVE_START) - lis2dw_start(ax, oid); - else + if (flags & LIS_PENDING) lis2dw_query(ax, oid); } } From daf875e6e4b8cb461a57623ecac37cf0f1f240e8 Mon Sep 17 00:00:00 2001 From: BIGTREETECH <38851044+bigtreetech@users.noreply.github.com> Date: Mon, 22 Jan 2024 09:23:12 +0800 Subject: [PATCH 49/63] stm32g0: Disable internal pull-down resistors on UCPDx CCx pins, because klipper never uses UCPD (#6462) Signed-off-by: Alan.Ma from BigTreeTech --- src/stm32/stm32g0.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/stm32/stm32g0.c b/src/stm32/stm32g0.c index 7408612ad..819a5edd4 100644 --- a/src/stm32/stm32g0.c +++ b/src/stm32/stm32g0.c @@ -162,6 +162,8 @@ bootloader_request(void) void armcm_main(void) { + // Disable internal pull-down resistors on UCPDx CCx pins + SYSCFG->CFGR1 |= (SYSCFG_CFGR1_UCPD1_STROBE | SYSCFG_CFGR1_UCPD2_STROBE); SCB->VTOR = (uint32_t)VectorTable; // Reset clock registers (in case bootloader has changed them) From f653db9c88aea646f506591ad2ea9b78eede4aa8 Mon Sep 17 00:00:00 2001 From: Jakub Date: Tue, 23 Jan 2024 00:55:34 +0100 Subject: [PATCH 50/63] stm32: Add 36KiB bootloader offset option (#6449) - This offset is used by Anycubic Kobra 2 Neo bootloader Signed-off-by: Jakub Przystasz --- src/stm32/Kconfig | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/stm32/Kconfig b/src/stm32/Kconfig index c06bb6ffb..f8523f492 100644 --- a/src/stm32/Kconfig +++ b/src/stm32/Kconfig @@ -285,6 +285,8 @@ choice bool "34KiB bootloader (Chitu v6 Bootloader)" if MACH_STM32F103 config STM32_FLASH_START_20200 bool "128KiB bootloader with 512 byte offset (Prusa Buddy)" if MACH_STM32F4x5 + config STM32_FLASH_START_9000 + bool "36KiB bootloader (Anycubic Kobra 2 Neo)" if MACH_STM32F1 config STM32_FLASH_START_C000 bool "48KiB bootloader (MKS Robin Nano V3)" if MACH_STM32F4x5 config STM32_FLASH_START_10000 @@ -312,6 +314,7 @@ config FLASH_APPLICATION_ADDRESS default 0x8007000 if STM32_FLASH_START_7000 default 0x8008000 if STM32_FLASH_START_8000 default 0x8008800 if STM32_FLASH_START_8800 + default 0x8009000 if STM32_FLASH_START_9000 default 0x800C000 if STM32_FLASH_START_C000 default 0x8010000 if STM32_FLASH_START_10000 default 0x8020000 if STM32_FLASH_START_20000 From 2e8b54ae5f01e2c4897eec109cd8a0cb3c0e5c4a Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Mon, 22 Jan 2024 18:58:41 -0500 Subject: [PATCH 51/63] stm32: Remove product names from bootloader choices menu Signed-off-by: Kevin O'Connor --- src/stm32/Kconfig | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/stm32/Kconfig b/src/stm32/Kconfig index f8523f492..2ae90bee8 100644 --- a/src/stm32/Kconfig +++ b/src/stm32/Kconfig @@ -282,24 +282,24 @@ choice config STM32_FLASH_START_8000 bool "32KiB bootloader" if MACH_STM32F1 || MACH_STM32F2 || MACH_STM32F4 || MACH_STM32F7 config STM32_FLASH_START_8800 - bool "34KiB bootloader (Chitu v6 Bootloader)" if MACH_STM32F103 + bool "34KiB bootloader" if MACH_STM32F103 config STM32_FLASH_START_20200 - bool "128KiB bootloader with 512 byte offset (Prusa Buddy)" if MACH_STM32F4x5 + bool "128KiB bootloader with 512 byte offset" if MACH_STM32F4x5 config STM32_FLASH_START_9000 - bool "36KiB bootloader (Anycubic Kobra 2 Neo)" if MACH_STM32F1 + bool "36KiB bootloader" if MACH_STM32F1 config STM32_FLASH_START_C000 - bool "48KiB bootloader (MKS Robin Nano V3)" if MACH_STM32F4x5 + bool "48KiB bootloader" if MACH_STM32F4x5 config STM32_FLASH_START_10000 bool "64KiB bootloader" if MACH_STM32F103 || MACH_STM32F4 config STM32_FLASH_START_800 - bool "2KiB bootloader (HID Bootloader)" if MACH_STM32F103 + bool "2KiB bootloader" if MACH_STM32F103 config STM32_FLASH_START_1000 bool "4KiB bootloader" if MACH_STM32F1 || MACH_STM32F0 config STM32_FLASH_START_4000 - bool "16KiB bootloader (HID Bootloader)" if MACH_STM32F207 || MACH_STM32F401 || MACH_STM32F4x5 || MACH_STM32F103 || MACH_STM32F072 + bool "16KiB bootloader" if MACH_STM32F207 || MACH_STM32F401 || MACH_STM32F4x5 || MACH_STM32F103 || MACH_STM32F072 config STM32_FLASH_START_20000 - bool "128KiB bootloader (SKR SE BX v2.0)" if MACH_STM32H743 || MACH_STM32H723 || MACH_STM32F7 + bool "128KiB bootloader" if MACH_STM32H743 || MACH_STM32H723 || MACH_STM32F7 config STM32_FLASH_START_0000 bool "No bootloader" From 4115ea128af3308c1a5af224fce83b12c2e97e1a Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Fri, 13 Jan 2023 10:02:47 -0500 Subject: [PATCH 52/63] output_pin: Deprecate static_value parameter Remove support for configuring "static" pins in output_pin module. A "static" pin only saves a few bytes of memory in the micro-controller. The savings does not justify the increased code complexity. Deprecate the static_value parameter to warn users. In the interim, a static_value parameter will set both value and shutdown_value parameters. Signed-off-by: Kevin O'Connor --- config/generic-mini-rambo.cfg | 6 +-- config/generic-ultimaker-ultimainboard-v2.cfg | 8 ++-- config/printer-adimlab-2018.cfg | 6 +-- config/printer-creality-cr30-2021.cfg | 1 - config/printer-lulzbot-mini1-2016.cfg | 6 +-- config/printer-wanhao-duplicator-6-2016.cfg | 6 +-- docs/Config_Changes.md | 4 ++ docs/Config_Reference.md | 7 +--- klippy/extras/output_pin.py | 39 ++++++++++--------- 9 files changed, 42 insertions(+), 41 deletions(-) diff --git a/config/generic-mini-rambo.cfg b/config/generic-mini-rambo.cfg index 61e2ac847..1a616cf80 100644 --- a/config/generic-mini-rambo.cfg +++ b/config/generic-mini-rambo.cfg @@ -84,7 +84,7 @@ pwm: True scale: 2.0 cycle_time: .000030 hardware_pwm: True -static_value: 1.3 +value: 1.3 [output_pin stepper_z_current] pin: PL4 @@ -92,7 +92,7 @@ pwm: True scale: 2.0 cycle_time: .000030 hardware_pwm: True -static_value: 1.3 +value: 1.3 [output_pin stepper_e_current] pin: PL5 @@ -100,7 +100,7 @@ pwm: True scale: 2.0 cycle_time: .000030 hardware_pwm: True -static_value: 1.25 +value: 1.25 [static_digital_output stepper_config] pins: diff --git a/config/generic-ultimaker-ultimainboard-v2.cfg b/config/generic-ultimaker-ultimainboard-v2.cfg index 9a4d4e6da..b1ce3fa55 100644 --- a/config/generic-ultimaker-ultimainboard-v2.cfg +++ b/config/generic-ultimaker-ultimainboard-v2.cfg @@ -97,7 +97,7 @@ max_z_accel: 30 [output_pin case_light] pin: PH5 -static_value: 1.0 +value: 1.0 # Motor current settings. [output_pin stepper_xy_current] @@ -107,7 +107,7 @@ scale: 2.000 # Max power setting. cycle_time: .000030 hardware_pwm: True -static_value: 1.200 +value: 1.200 # Power adjustment setting. [output_pin stepper_z_current] @@ -116,7 +116,7 @@ pwm: True scale: 2.000 cycle_time: .000030 hardware_pwm: True -static_value: 1.200 +value: 1.200 [output_pin stepper_e_current] pin: PL3 @@ -124,4 +124,4 @@ pwm: True scale: 2.000 cycle_time: .000030 hardware_pwm: True -static_value: 1.250 +value: 1.250 diff --git a/config/printer-adimlab-2018.cfg b/config/printer-adimlab-2018.cfg index 2f02173dd..d810e9d7e 100644 --- a/config/printer-adimlab-2018.cfg +++ b/config/printer-adimlab-2018.cfg @@ -89,7 +89,7 @@ pwm: True scale: 2.0 cycle_time: .000030 hardware_pwm: True -static_value: 1.3 +value: 1.3 [output_pin stepper_z_current] pin: PL4 @@ -97,7 +97,7 @@ pwm: True scale: 2.0 cycle_time: .000030 hardware_pwm: True -static_value: 1.3 +value: 1.3 [output_pin stepper_e_current] pin: PL3 @@ -105,7 +105,7 @@ pwm: True scale: 2.0 cycle_time: .000030 hardware_pwm: True -static_value: 1.25 +value: 1.25 [display] lcd_type: st7920 diff --git a/config/printer-creality-cr30-2021.cfg b/config/printer-creality-cr30-2021.cfg index de9469200..1edc75313 100644 --- a/config/printer-creality-cr30-2021.cfg +++ b/config/printer-creality-cr30-2021.cfg @@ -98,7 +98,6 @@ max_temp: 100 [output_pin led] pin: PC14 -static_value: 0 # Neopixel LED support # [neopixel led_neopixel] diff --git a/config/printer-lulzbot-mini1-2016.cfg b/config/printer-lulzbot-mini1-2016.cfg index 9be60cbdd..52b8061ee 100644 --- a/config/printer-lulzbot-mini1-2016.cfg +++ b/config/printer-lulzbot-mini1-2016.cfg @@ -125,7 +125,7 @@ pwm: True scale: 2.0 cycle_time: .000030 hardware_pwm: True -static_value: 1.300 +value: 1.300 [output_pin stepper_z_current] pin: PL4 @@ -133,7 +133,7 @@ pwm: True scale: 2.0 cycle_time: .000030 hardware_pwm: True -static_value: 1.630 +value: 1.630 [output_pin stepper_e_current] pin: PL5 @@ -141,7 +141,7 @@ pwm: True scale: 2.0 cycle_time: .000030 hardware_pwm: True -static_value: 1.250 +value: 1.250 [static_digital_output stepper_config] # Microstepping pins diff --git a/config/printer-wanhao-duplicator-6-2016.cfg b/config/printer-wanhao-duplicator-6-2016.cfg index b1d35faec..de8a3de87 100644 --- a/config/printer-wanhao-duplicator-6-2016.cfg +++ b/config/printer-wanhao-duplicator-6-2016.cfg @@ -86,7 +86,7 @@ pwm: True scale: 2.782 cycle_time: .000030 hardware_pwm: True -static_value: 1.2 +value: 1.2 [output_pin stepper_z_current] pin: PL4 @@ -94,7 +94,7 @@ pwm: True scale: 2.782 cycle_time: .000030 hardware_pwm: True -static_value: 1.2 +value: 1.2 [output_pin stepper_e_current] pin: PL3 @@ -102,7 +102,7 @@ pwm: True scale: 2.782 cycle_time: .000030 hardware_pwm: True -static_value: 1.0 +value: 1.0 [display] lcd_type: ssd1306 diff --git a/docs/Config_Changes.md b/docs/Config_Changes.md index 2ceb868db..bd00e3d7e 100644 --- a/docs/Config_Changes.md +++ b/docs/Config_Changes.md @@ -8,6 +8,10 @@ All dates in this document are approximate. ## Changes +20240123: The output_pin `static_value` parameter is deprecated. +Replace with `value` and `shutdown_value` parameters. The option will +be removed in the near future. + 20231216: The `[hall_filament_width_sensor]` is changed to trigger filament runout when the thickness of the filament exceeds `max_diameter`. The maximum diameter defaults to `default_nominal_filament_diameter + max_difference`. See diff --git a/docs/Config_Reference.md b/docs/Config_Reference.md index 4b53ef264..a6964e5bd 100644 --- a/docs/Config_Reference.md +++ b/docs/Config_Reference.md @@ -3096,11 +3096,6 @@ pin: # If this is true, the value fields should be between 0 and 1; if it # is false the value fields should be either 0 or 1. The default is # False. -#static_value: -# If this is set, then the pin is assigned to this value at startup -# and the pin can not be changed during runtime. A static pin uses -# slightly less ram in the micro-controller. The default is to use -# runtime configuration of pins. #value: # The value to initially set the pin to during MCU configuration. # The default is 0 (for low voltage). @@ -3133,6 +3128,8 @@ pin: # then the 'value' parameter can be specified using the desired # amperage for the stepper. The default is to not scale the 'value' # parameter. +#static_value: +# This option is deprecated and should no longer be specified. ``` ### [pwm_tool] diff --git a/klippy/extras/output_pin.py b/klippy/extras/output_pin.py index 8b41aca7a..7b78775be 100644 --- a/klippy/extras/output_pin.py +++ b/klippy/extras/output_pin.py @@ -12,6 +12,7 @@ class PrinterOutputPin: def __init__(self, config): self.printer = config.get_printer() ppins = self.printer.lookup_object('pins') + # Determine pin type self.is_pwm = config.getboolean('pwm', False) if self.is_pwm: self.mcu_pin = ppins.setup_pin('pwm', config.get('pin')) @@ -26,34 +27,34 @@ def __init__(self, config): self.scale = 1. self.last_cycle_time = self.default_cycle_time = 0. self.last_print_time = 0. - static_value = config.getfloat('static_value', None, - minval=0., maxval=self.scale) + # Support mcu checking for maximum duration self.reactor = self.printer.get_reactor() self.resend_timer = None self.resend_interval = 0. + max_mcu_duration = config.getfloat('maximum_mcu_duration', 0., + minval=0.500, + maxval=MAX_SCHEDULE_TIME) + self.mcu_pin.setup_max_duration(max_mcu_duration) + if max_mcu_duration: + self.resend_interval = max_mcu_duration - RESEND_HOST_TIME + # Determine start and shutdown values + static_value = config.getfloat('static_value', None, + minval=0., maxval=self.scale) if static_value is not None: - self.mcu_pin.setup_max_duration(0.) - self.last_value = static_value / self.scale - self.mcu_pin.setup_start_value( - self.last_value, self.last_value, True) + config.deprecate('static_value') + self.last_value = self.shutdown_value = static_value / self.scale else: - max_mcu_duration = config.getfloat('maximum_mcu_duration', 0., - minval=0.500, - maxval=MAX_SCHEDULE_TIME) - self.mcu_pin.setup_max_duration(max_mcu_duration) - if max_mcu_duration: - self.resend_interval = max_mcu_duration - RESEND_HOST_TIME - self.last_value = config.getfloat( 'value', 0., minval=0., maxval=self.scale) / self.scale self.shutdown_value = config.getfloat( 'shutdown_value', 0., minval=0., maxval=self.scale) / self.scale - self.mcu_pin.setup_start_value(self.last_value, self.shutdown_value) - pin_name = config.get_name().split()[1] - gcode = self.printer.lookup_object('gcode') - gcode.register_mux_command("SET_PIN", "PIN", pin_name, - self.cmd_SET_PIN, - desc=self.cmd_SET_PIN_help) + self.mcu_pin.setup_start_value(self.last_value, self.shutdown_value) + # Register commands + pin_name = config.get_name().split()[1] + gcode = self.printer.lookup_object('gcode') + gcode.register_mux_command("SET_PIN", "PIN", pin_name, + self.cmd_SET_PIN, + desc=self.cmd_SET_PIN_help) def get_status(self, eventtime): return {'value': self.last_value} def _set_pin(self, print_time, value, cycle_time, is_resend=False): From 7abafb575ba7b098b9f1025a1d65717568a89876 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Fri, 13 Jan 2023 10:18:18 -0500 Subject: [PATCH 53/63] mcu: Remove support for "static" pins Update static_digital_output.py to directly configure static digital pins. There are no other users of "static" pins, so remove that support from mcu.py, replicape.py, and sx1509.py. This simplifies the low-level pin handling code. Signed-off-by: Kevin O'Connor --- klippy/extras/replicape.py | 13 +------------ klippy/extras/static_digital_output.py | 6 ++++-- klippy/extras/sx1509.py | 12 ++---------- klippy/mcu.py | 26 ++------------------------ 4 files changed, 9 insertions(+), 48 deletions(-) diff --git a/klippy/extras/replicape.py b/klippy/extras/replicape.py index 4c3762974..d6737a359 100644 --- a/klippy/extras/replicape.py +++ b/klippy/extras/replicape.py @@ -30,7 +30,6 @@ def __init__(self, replicape, channel, pin_type, pin_params): self._invert = pin_params['invert'] self._start_value = self._shutdown_value = float(self._invert) self._is_enable = not not self._start_value - self._is_static = False self._last_clock = 0 self._pwm_max = 0. self._set_cmd = None @@ -44,28 +43,18 @@ def setup_cycle_time(self, cycle_time, hardware_pwm=False): if cycle_time != self._cycle_time: logging.info("Ignoring pca9685 cycle time of %.6f (using %.6f)", cycle_time, self._cycle_time) - def setup_start_value(self, start_value, shutdown_value, is_static=False): - if is_static and start_value != shutdown_value: - raise pins.error("Static pin can not have shutdown value") + def setup_start_value(self, start_value, shutdown_value): if self._invert: start_value = 1. - start_value shutdown_value = 1. - shutdown_value self._start_value = max(0., min(1., start_value)) self._shutdown_value = max(0., min(1., shutdown_value)) - self._is_static = is_static self._replicape.note_pwm_start_value( self._channel, self._start_value, self._shutdown_value) self._is_enable = not not self._start_value def _build_config(self): self._pwm_max = self._mcu.get_constant_float("PCA9685_MAX") cycle_ticks = self._mcu.seconds_to_clock(self._cycle_time) - if self._is_static: - self._mcu.add_config_cmd( - "set_pca9685_out bus=%d addr=%d channel=%d" - " cycle_ticks=%d value=%d" % ( - self._bus, self._address, self._channel, - cycle_ticks, self._start_value * self._pwm_max)) - return self._mcu.request_move_queue_slot() self._oid = self._mcu.create_oid() self._mcu.add_config_cmd( diff --git a/klippy/extras/static_digital_output.py b/klippy/extras/static_digital_output.py index ce2093720..2fa0bb3f5 100644 --- a/klippy/extras/static_digital_output.py +++ b/klippy/extras/static_digital_output.py @@ -10,8 +10,10 @@ def __init__(self, config): ppins = printer.lookup_object('pins') pin_list = config.getlist('pins') for pin_desc in pin_list: - mcu_pin = ppins.setup_pin('digital_out', pin_desc) - mcu_pin.setup_start_value(1, 1, True) + pin_params = ppins.lookup_pin(pin_desc, can_invert=True) + mcu = pin_params['chip'] + mcu.add_config_cmd("set_digital_out pin=%s value=%d" + % (pin_params['pin'], not pin_params['invert'])) def load_config_prefix(config): return PrinterStaticDigitalOut(config) diff --git a/klippy/extras/sx1509.py b/klippy/extras/sx1509.py index 8b19dda80..51080fe24 100644 --- a/klippy/extras/sx1509.py +++ b/klippy/extras/sx1509.py @@ -104,7 +104,6 @@ def __init__(self, sx1509, pin_params): self._invert = pin_params['invert'] self._mcu.register_config_callback(self._build_config) self._start_value = self._shutdown_value = self._invert - self._is_static = False self._max_duration = 2. self._set_cmd = self._clear_cmd = None # Set direction to output @@ -116,12 +115,9 @@ def get_mcu(self): return self._mcu def setup_max_duration(self, max_duration): self._max_duration = max_duration - def setup_start_value(self, start_value, shutdown_value, is_static=False): - if is_static and start_value != shutdown_value: - raise pins.error("Static pin can not have shutdown value") + def setup_start_value(self, start_value, shutdown_value): self._start_value = (not not start_value) ^ self._invert self._shutdown_value = self._invert - self._is_static = is_static # We need to set the start value here so the register is # updated before the SX1509 class writes it. if self._start_value: @@ -148,7 +144,6 @@ def __init__(self, sx1509, pin_params): self._invert = pin_params['invert'] self._mcu.register_config_callback(self._build_config) self._start_value = self._shutdown_value = float(self._invert) - self._is_static = False self._max_duration = 2. self._hardware_pwm = False self._pwm_max = 0. @@ -182,15 +177,12 @@ def setup_max_duration(self, max_duration): def setup_cycle_time(self, cycle_time, hardware_pwm=False): self._cycle_time = cycle_time self._hardware_pwm = hardware_pwm - def setup_start_value(self, start_value, shutdown_value, is_static=False): - if is_static and start_value != shutdown_value: - raise pins.error("Static pin can not have shutdown value") + def setup_start_value(self, start_value, shutdown_value): if self._invert: start_value = 1. - start_value shutdown_value = 1. - shutdown_value self._start_value = max(0., min(1., start_value)) self._shutdown_value = max(0., min(1., shutdown_value)) - self._is_static = is_static def set_pwm(self, print_time, value, cycle_time=None): self._sx1509.set_register(self._i_on_reg, ~int(255 * value) if not self._invert diff --git a/klippy/mcu.py b/klippy/mcu.py index cfc389e76..7f784de7f 100644 --- a/klippy/mcu.py +++ b/klippy/mcu.py @@ -335,7 +335,6 @@ def __init__(self, mcu, pin_params): self._pin = pin_params['pin'] self._invert = pin_params['invert'] self._start_value = self._shutdown_value = self._invert - self._is_static = False self._max_duration = 2. self._last_clock = 0 self._set_cmd = None @@ -343,17 +342,10 @@ def get_mcu(self): return self._mcu def setup_max_duration(self, max_duration): self._max_duration = max_duration - def setup_start_value(self, start_value, shutdown_value, is_static=False): - if is_static and start_value != shutdown_value: - raise pins.error("Static pin can not have shutdown value") + def setup_start_value(self, start_value, shutdown_value): self._start_value = (not not start_value) ^ self._invert self._shutdown_value = (not not shutdown_value) ^ self._invert - self._is_static = is_static def _build_config(self): - if self._is_static: - self._mcu.add_config_cmd("set_digital_out pin=%s value=%d" - % (self._pin, self._start_value)) - return if self._max_duration and self._start_value != self._shutdown_value: raise pins.error("Pin with max duration must have start" " value equal to shutdown value") @@ -389,7 +381,6 @@ def __init__(self, mcu, pin_params): self._pin = pin_params['pin'] self._invert = pin_params['invert'] self._start_value = self._shutdown_value = float(self._invert) - self._is_static = False self._last_clock = self._last_cycle_ticks = 0 self._pwm_max = 0. self._set_cmd = self._set_cycle_ticks = None @@ -400,15 +391,12 @@ def setup_max_duration(self, max_duration): def setup_cycle_time(self, cycle_time, hardware_pwm=False): self._cycle_time = cycle_time self._hardware_pwm = hardware_pwm - def setup_start_value(self, start_value, shutdown_value, is_static=False): - if is_static and start_value != shutdown_value: - raise pins.error("Static pin can not have shutdown value") + def setup_start_value(self, start_value, shutdown_value): if self._invert: start_value = 1. - start_value shutdown_value = 1. - shutdown_value self._start_value = max(0., min(1., start_value)) self._shutdown_value = max(0., min(1., shutdown_value)) - self._is_static = is_static def _build_config(self): if self._max_duration and self._start_value != self._shutdown_value: raise pins.error("Pin with max duration must have start" @@ -423,12 +411,6 @@ def _build_config(self): raise pins.error("PWM pin max duration too large") if self._hardware_pwm: self._pwm_max = self._mcu.get_constant_float("PWM_MAX") - if self._is_static: - self._mcu.add_config_cmd( - "set_pwm_out pin=%s cycle_ticks=%d value=%d" - % (self._pin, cycle_ticks, - self._start_value * self._pwm_max)) - return self._mcu.request_move_queue_slot() self._oid = self._mcu.create_oid() self._mcu.add_config_cmd( @@ -447,10 +429,6 @@ def _build_config(self): # Software PWM if self._shutdown_value not in [0., 1.]: raise pins.error("shutdown value must be 0.0 or 1.0 on soft pwm") - if self._is_static: - self._mcu.add_config_cmd("set_digital_out pin=%s value=%d" - % (self._pin, self._start_value >= 0.5)) - return if cycle_ticks >= 1<<31: raise pins.error("PWM pin cycle time too large") self._mcu.request_move_queue_slot() From 1baa45913ffd05a808d5d9ea0ae0161ebbaff247 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Fri, 12 Jan 2024 22:52:32 -0500 Subject: [PATCH 54/63] output_pin: Deprecate the maximum_mcu_duration parameter Advise users to configure a pwm_tool config section if checking for maximum mcu duration is required. Signed-off-by: Kevin O'Connor --- docs/Config_Changes.md | 4 ++++ docs/Config_Reference.md | 18 +++++++++--------- klippy/extras/output_pin.py | 1 + 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/docs/Config_Changes.md b/docs/Config_Changes.md index bd00e3d7e..c660c42aa 100644 --- a/docs/Config_Changes.md +++ b/docs/Config_Changes.md @@ -8,6 +8,10 @@ All dates in this document are approximate. ## Changes +20240123: The output_pin `maximum_mcu_duration` parameter is +deprecated. Use a [pwm_tool config section](Config_Reference.md#pwm_tool) +instead. The option will be removed in the near future. + 20240123: The output_pin `static_value` parameter is deprecated. Replace with `value` and `shutdown_value` parameters. The option will be removed in the near future. diff --git a/docs/Config_Reference.md b/docs/Config_Reference.md index a6964e5bd..226b7a133 100644 --- a/docs/Config_Reference.md +++ b/docs/Config_Reference.md @@ -3102,13 +3102,6 @@ pin: #shutdown_value: # The value to set the pin to on an MCU shutdown event. The default # is 0 (for low voltage). -#maximum_mcu_duration: -# The maximum duration a non-shutdown value may be driven by the MCU -# without an acknowledge from the host. -# If host can not keep up with an update, the MCU will shutdown -# and set all pins to their respective shutdown values. -# Default: 0 (disabled) -# Usual values are around 5 seconds. #cycle_time: 0.100 # The amount of time (in seconds) per PWM cycle. It is recommended # this be 10 milliseconds or greater when using software based PWM. @@ -3128,8 +3121,9 @@ pin: # then the 'value' parameter can be specified using the desired # amperage for the stepper. The default is to not scale the 'value' # parameter. +#maximum_mcu_duration: #static_value: -# This option is deprecated and should no longer be specified. +# These options are deprecated and should no longer be specified. ``` ### [pwm_tool] @@ -3144,9 +3138,15 @@ extended [g-code commands](G-Codes.md#output_pin). [pwm_tool my_tool] pin: # The pin to configure as an output. This parameter must be provided. +#maximum_mcu_duration: +# The maximum duration a non-shutdown value may be driven by the MCU +# without an acknowledge from the host. +# If host can not keep up with an update, the MCU will shutdown +# and set all pins to their respective shutdown values. +# Default: 0 (disabled) +# Usual values are around 5 seconds. #value: #shutdown_value: -#maximum_mcu_duration: #cycle_time: 0.100 #hardware_pwm: False #scale: diff --git a/klippy/extras/output_pin.py b/klippy/extras/output_pin.py index 7b78775be..76789c37d 100644 --- a/klippy/extras/output_pin.py +++ b/klippy/extras/output_pin.py @@ -36,6 +36,7 @@ def __init__(self, config): maxval=MAX_SCHEDULE_TIME) self.mcu_pin.setup_max_duration(max_mcu_duration) if max_mcu_duration: + config.deprecate('maximum_mcu_duration') self.resend_interval = max_mcu_duration - RESEND_HOST_TIME # Determine start and shutdown values static_value = config.getfloat('static_value', None, From fd2feff67df65c559cafc8fc5f2fd8601355e81a Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Fri, 13 Jan 2023 11:20:19 -0500 Subject: [PATCH 55/63] pwm_cycle_time: New module for output pins with dynamic cycle times Remove support for changing the cycle time of pwm pins from the output_pin module. Use a new pwm_cycle_time module that supports setting dynamic cycle times. This simplifies the output_pin code and low-level pin update code. Signed-off-by: Kevin O'Connor --- config/sample-macros.cfg | 4 +- docs/Config_Changes.md | 5 ++ docs/Config_Reference.md | 18 +++++ docs/G-Codes.md | 30 +++++--- docs/Status_Reference.md | 7 ++ klippy/extras/output_pin.py | 25 +++---- klippy/extras/pwm_cycle_time.py | 123 ++++++++++++++++++++++++++++++++ test/klippy/pwm.cfg | 6 ++ test/klippy/pwm.test | 24 ++++--- 9 files changed, 204 insertions(+), 38 deletions(-) create mode 100644 klippy/extras/pwm_cycle_time.py diff --git a/config/sample-macros.cfg b/config/sample-macros.cfg index 5132e1c99..f5649d61a 100644 --- a/config/sample-macros.cfg +++ b/config/sample-macros.cfg @@ -61,12 +61,10 @@ gcode: # P is the tone duration, S the tone frequency. # The frequency won't be pitch perfect. -[output_pin BEEPER_pin] +[pwm_cycle_time BEEPER_pin] pin: ar37 # Beeper pin. This parameter must be provided. # ar37 is the default RAMPS/MKS pin. -pwm: True -# A piezo beeper needs a PWM signal, a DC buzzer doesn't. value: 0 # Silent at power on, set to 1 if active low. shutdown_value: 0 diff --git a/docs/Config_Changes.md b/docs/Config_Changes.md index c660c42aa..ae2c5f0a8 100644 --- a/docs/Config_Changes.md +++ b/docs/Config_Changes.md @@ -8,6 +8,11 @@ All dates in this document are approximate. ## Changes +20240123: The output_pin SET_PIN CYCLE_TIME parameter has been +removed. Use the new +[pwm_cycle_time](Config_Reference.md#pwm_cycle_time) module if it is +necessary to dynamically change a pwm pin's cycle time. + 20240123: The output_pin `maximum_mcu_duration` parameter is deprecated. Use a [pwm_tool config section](Config_Reference.md#pwm_tool) instead. The option will be removed in the near future. diff --git a/docs/Config_Reference.md b/docs/Config_Reference.md index 226b7a133..3b2f17709 100644 --- a/docs/Config_Reference.md +++ b/docs/Config_Reference.md @@ -3153,6 +3153,24 @@ pin: # See the "output_pin" section for the definition of these parameters. ``` +### [pwm_cycle_time] + +Run-time configurable output pins with dynamic pwm cycle timing (one +may define any number of sections with an "pwm_cycle_time" prefix). +Pins configured here will be setup as output pins and one may modify +them at run-time using "SET_PIN PIN=my_pin VALUE=.1 CYCLE_TIME=0.100" +type extended [g-code commands](G-Codes.md#pwm_cycle_time). + +``` +[pwm_cycle_time my_pin] +pin: +#value: +#shutdown_value: +#cycle_time: 0.100 +#scale: +# See the "output_pin" section for information on these parameters. +``` + ### [static_digital_output] Statically configured digital output pins (one may define any number diff --git a/docs/G-Codes.md b/docs/G-Codes.md index 8c70609f1..92cb76606 100644 --- a/docs/G-Codes.md +++ b/docs/G-Codes.md @@ -839,17 +839,10 @@ The following command is available when an enabled. #### SET_PIN -`SET_PIN PIN=config_name VALUE= [CYCLE_TIME=]`: Set -the pin to the given output `VALUE`. VALUE should be 0 or 1 for -"digital" output pins. For PWM pins, set to a value between 0.0 and -1.0, or between 0.0 and `scale` if a scale is configured in the -output_pin config section. - -Some pins (currently only "soft PWM" pins) support setting an explicit -cycle time using the CYCLE_TIME parameter (specified in seconds). Note -that the CYCLE_TIME parameter is not stored between SET_PIN commands -(any SET_PIN command without an explicit CYCLE_TIME parameter will use -the `cycle_time` specified in the output_pin config section). +`SET_PIN PIN=config_name VALUE=`: Set the pin to the given +output `VALUE`. VALUE should be 0 or 1 for "digital" output pins. For +PWM pins, set to a value between 0.0 and 1.0, or between 0.0 and +`scale` if a scale is configured in the output_pin config section. ### [palette2] @@ -978,6 +971,21 @@ babystepping), and subtract if from the probe's z_offset. This acts to take a frequently used babystepping value, and "make it permanent". Requires a `SAVE_CONFIG` to take effect. +### [pwm_cycle_time] + +The following command is available when a +[pwm_cycle_time config section](Config_Reference.md#pwm_cycle_time) +is enabled. + +#### SET_PIN +`SET_PIN PIN=config_name VALUE= [CYCLE_TIME=]`: +This command works similarly to [output_pin](#output_pin) SET_PIN +commands. The command here supports setting an explicit cycle time +using the CYCLE_TIME parameter (specified in seconds). Note that the +CYCLE_TIME parameter is not stored between SET_PIN commands (any +SET_PIN command without an explicit CYCLE_TIME parameter will use the +`cycle_time` specified in the pwm_cycle_time config section). + ### [query_adc] The query_adc module is automatically loaded. diff --git a/docs/Status_Reference.md b/docs/Status_Reference.md index b64108ae2..055d1dc0e 100644 --- a/docs/Status_Reference.md +++ b/docs/Status_Reference.md @@ -374,6 +374,13 @@ is defined): template expansion, the PROBE (or similar) command must be run prior to the macro containing this reference. +## pwm_cycle_time + +The following information is available in +[pwm_cycle_time some_name](Config_Reference.md#pwm_cycle_time) +objects: +- `value`: The "value" of the pin, as set by a `SET_PIN` command. + ## quad_gantry_level The following information is available in the `quad_gantry_level` object diff --git a/klippy/extras/output_pin.py b/klippy/extras/output_pin.py index 76789c37d..ef094674c 100644 --- a/klippy/extras/output_pin.py +++ b/klippy/extras/output_pin.py @@ -1,6 +1,6 @@ -# Code to configure miscellaneous chips +# PWM and digital output pin handling # -# Copyright (C) 2017-2021 Kevin O'Connor +# Copyright (C) 2017-2024 Kevin O'Connor # # This file may be distributed under the terms of the GNU GPLv3 license. @@ -21,11 +21,9 @@ def __init__(self, config): hardware_pwm = config.getboolean('hardware_pwm', False) self.mcu_pin.setup_cycle_time(cycle_time, hardware_pwm) self.scale = config.getfloat('scale', 1., above=0.) - self.last_cycle_time = self.default_cycle_time = cycle_time else: self.mcu_pin = ppins.setup_pin('digital_out', config.get('pin')) self.scale = 1. - self.last_cycle_time = self.default_cycle_time = 0. self.last_print_time = 0. # Support mcu checking for maximum duration self.reactor = self.printer.get_reactor() @@ -58,32 +56,30 @@ def __init__(self, config): desc=self.cmd_SET_PIN_help) def get_status(self, eventtime): return {'value': self.last_value} - def _set_pin(self, print_time, value, cycle_time, is_resend=False): - if value == self.last_value and cycle_time == self.last_cycle_time: - if not is_resend: - return + def _set_pin(self, print_time, value, is_resend=False): + if value == self.last_value and not is_resend: + return print_time = max(print_time, self.last_print_time + PIN_MIN_TIME) if self.is_pwm: - self.mcu_pin.set_pwm(print_time, value, cycle_time) + self.mcu_pin.set_pwm(print_time, value) else: self.mcu_pin.set_digital(print_time, value) self.last_value = value - self.last_cycle_time = cycle_time self.last_print_time = print_time if self.resend_interval and self.resend_timer is None: self.resend_timer = self.reactor.register_timer( self._resend_current_val, self.reactor.NOW) cmd_SET_PIN_help = "Set the value of an output pin" def cmd_SET_PIN(self, gcmd): + # Read requested value value = gcmd.get_float('VALUE', minval=0., maxval=self.scale) value /= self.scale - cycle_time = gcmd.get_float('CYCLE_TIME', self.default_cycle_time, - above=0., maxval=MAX_SCHEDULE_TIME) if not self.is_pwm and value not in [0., 1.]: raise gcmd.error("Invalid pin value") + # Obtain print_time and apply requested settings toolhead = self.printer.lookup_object('toolhead') toolhead.register_lookahead_callback( - lambda print_time: self._set_pin(print_time, value, cycle_time)) + lambda print_time: self._set_pin(print_time, value)) def _resend_current_val(self, eventtime): if self.last_value == self.shutdown_value: @@ -97,8 +93,7 @@ def _resend_current_val(self, eventtime): if time_diff > 0.: # Reschedule for resend time return systime + time_diff - self._set_pin(print_time + PIN_MIN_TIME, - self.last_value, self.last_cycle_time, True) + self._set_pin(print_time + PIN_MIN_TIME, self.last_value, True) return systime + self.resend_interval def load_config_prefix(config): diff --git a/klippy/extras/pwm_cycle_time.py b/klippy/extras/pwm_cycle_time.py new file mode 100644 index 000000000..cebbec751 --- /dev/null +++ b/klippy/extras/pwm_cycle_time.py @@ -0,0 +1,123 @@ +# Handle pwm output pins with variable frequency +# +# Copyright (C) 2017-2023 Kevin O'Connor +# +# This file may be distributed under the terms of the GNU GPLv3 license. + +PIN_MIN_TIME = 0.100 +MAX_SCHEDULE_TIME = 5.0 + +class MCU_pwm_cycle: + def __init__(self, pin_params, cycle_time, start_value, shutdown_value): + self._mcu = pin_params['chip'] + self._cycle_time = cycle_time + self._oid = None + self._mcu.register_config_callback(self._build_config) + self._pin = pin_params['pin'] + self._invert = pin_params['invert'] + if self._invert: + start_value = 1. - start_value + shutdown_value = 1. - shutdown_value + self._start_value = max(0., min(1., start_value)) + self._shutdown_value = max(0., min(1., shutdown_value)) + self._last_clock = self._cycle_ticks = 0 + self._set_cmd = self._set_cycle_ticks = None + def _build_config(self): + cmd_queue = self._mcu.alloc_command_queue() + curtime = self._mcu.get_printer().get_reactor().monotonic() + printtime = self._mcu.estimated_print_time(curtime) + self._last_clock = self._mcu.print_time_to_clock(printtime + 0.200) + cycle_ticks = self._mcu.seconds_to_clock(self._cycle_time) + if self._shutdown_value not in [0., 1.]: + raise self._mcu.get_printer().config_error( + "shutdown value must be 0.0 or 1.0 on soft pwm") + if cycle_ticks >= 1<<31: + raise self._mcu.get_printer().config_error( + "PWM pin cycle time too large") + self._mcu.request_move_queue_slot() + self._oid = self._mcu.create_oid() + self._mcu.add_config_cmd( + "config_digital_out oid=%d pin=%s value=%d" + " default_value=%d max_duration=%d" + % (self._oid, self._pin, self._start_value >= 1.0, + self._shutdown_value >= 0.5, 0)) + self._mcu.add_config_cmd( + "set_digital_out_pwm_cycle oid=%d cycle_ticks=%d" + % (self._oid, cycle_ticks)) + self._cycle_ticks = cycle_ticks + svalue = int(self._start_value * cycle_ticks + 0.5) + self._mcu.add_config_cmd( + "queue_digital_out oid=%d clock=%d on_ticks=%d" + % (self._oid, self._last_clock, svalue), is_init=True) + self._set_cmd = self._mcu.lookup_command( + "queue_digital_out oid=%c clock=%u on_ticks=%u", cq=cmd_queue) + self._set_cycle_ticks = self._mcu.lookup_command( + "set_digital_out_pwm_cycle oid=%c cycle_ticks=%u", cq=cmd_queue) + def set_pwm_cycle(self, print_time, value, cycle_time): + clock = self._mcu.print_time_to_clock(print_time) + minclock = self._last_clock + # Send updated cycle_time if necessary + cycle_ticks = self._mcu.seconds_to_clock(cycle_time) + if cycle_ticks != self._cycle_ticks: + if cycle_ticks >= 1<<31: + raise self._mcu.get_printer().command_error( + "PWM cycle time too large") + self._set_cycle_ticks.send([self._oid, cycle_ticks], + minclock=minclock, reqclock=clock) + self._cycle_ticks = cycle_ticks + # Send pwm update + if self._invert: + value = 1. - value + v = int(max(0., min(1., value)) * float(self._cycle_ticks) + 0.5) + self._set_cmd.send([self._oid, clock, v], + minclock=self._last_clock, reqclock=clock) + self._last_clock = clock + +class PrinterOutputPWMCycle: + def __init__(self, config): + self.printer = config.get_printer() + self.last_print_time = 0. + cycle_time = config.getfloat('cycle_time', 0.100, above=0., + maxval=MAX_SCHEDULE_TIME) + self.last_cycle_time = self.default_cycle_time = cycle_time + # Determine start and shutdown values + self.scale = config.getfloat('scale', 1., above=0.) + self.last_value = config.getfloat( + 'value', 0., minval=0., maxval=self.scale) / self.scale + self.shutdown_value = config.getfloat( + 'shutdown_value', 0., minval=0., maxval=self.scale) / self.scale + # Create pwm pin object + ppins = self.printer.lookup_object('pins') + pin_params = ppins.lookup_pin(config.get('pin'), can_invert=True) + self.mcu_pin = MCU_pwm_cycle(pin_params, cycle_time, + self.last_value, self.shutdown_value) + # Register commands + pin_name = config.get_name().split()[1] + gcode = self.printer.lookup_object('gcode') + gcode.register_mux_command("SET_PIN", "PIN", pin_name, + self.cmd_SET_PIN, + desc=self.cmd_SET_PIN_help) + def get_status(self, eventtime): + return {'value': self.last_value} + def _set_pin(self, print_time, value, cycle_time): + if value == self.last_value and cycle_time == self.last_cycle_time: + return + print_time = max(print_time, self.last_print_time + PIN_MIN_TIME) + self.mcu_pin.set_pwm_cycle(print_time, value, cycle_time) + self.last_value = value + self.last_cycle_time = cycle_time + self.last_print_time = print_time + cmd_SET_PIN_help = "Set the value of an output pin" + def cmd_SET_PIN(self, gcmd): + # Read requested value + value = gcmd.get_float('VALUE', minval=0., maxval=self.scale) + value /= self.scale + cycle_time = gcmd.get_float('CYCLE_TIME', self.default_cycle_time, + above=0., maxval=MAX_SCHEDULE_TIME) + # Obtain print_time and apply requested settings + toolhead = self.printer.lookup_object('toolhead') + toolhead.register_lookahead_callback( + lambda print_time: self._set_pin(print_time, value, cycle_time)) + +def load_config_prefix(config): + return PrinterOutputPWMCycle(config) diff --git a/test/klippy/pwm.cfg b/test/klippy/pwm.cfg index fbda91269..af5b5b10e 100644 --- a/test/klippy/pwm.cfg +++ b/test/klippy/pwm.cfg @@ -5,6 +5,12 @@ value: 0 shutdown_value: 0 cycle_time: 0.01 +[pwm_cycle_time cycle_pwm_pin] +pin: PH7 +value: 0 +shutdown_value: 0 +cycle_time: 0.01 + [output_pin hard_pwm_pin] pin: PH6 pwm: True diff --git a/test/klippy/pwm.test b/test/klippy/pwm.test index 5e74a3e05..fdbf42f2a 100644 --- a/test/klippy/pwm.test +++ b/test/klippy/pwm.test @@ -16,18 +16,24 @@ SET_PIN PIN=soft_pwm_pin VALUE=0 SET_PIN PIN=soft_pwm_pin VALUE=0.5 SET_PIN PIN=soft_pwm_pin VALUE=1 +# Soft PWM with dynamic cycle time +# Test basic on off +SET_PIN PIN=cycle_pwm_pin VALUE=0 +SET_PIN PIN=cycle_pwm_pin VALUE=0.5 +SET_PIN PIN=cycle_pwm_pin VALUE=1 + # Test cycle time -SET_PIN PIN=soft_pwm_pin VALUE=0 CYCLE_TIME=0.1 -SET_PIN PIN=soft_pwm_pin VALUE=1 CYCLE_TIME=0.5 -SET_PIN PIN=soft_pwm_pin VALUE=0.5 CYCLE_TIME=0.001 -SET_PIN PIN=soft_pwm_pin VALUE=0.75 CYCLE_TIME=0.01 -SET_PIN PIN=soft_pwm_pin VALUE=0.5 CYCLE_TIME=1 +SET_PIN PIN=cycle_pwm_pin VALUE=0 CYCLE_TIME=0.1 +SET_PIN PIN=cycle_pwm_pin VALUE=1 CYCLE_TIME=0.5 +SET_PIN PIN=cycle_pwm_pin VALUE=0.5 CYCLE_TIME=0.001 +SET_PIN PIN=cycle_pwm_pin VALUE=0.75 CYCLE_TIME=0.01 +SET_PIN PIN=cycle_pwm_pin VALUE=0.5 CYCLE_TIME=1 # Test duplicate values -SET_PIN PIN=soft_pwm_pin VALUE=0.5 CYCLE_TIME=0.5 -SET_PIN PIN=soft_pwm_pin VALUE=0.5 CYCLE_TIME=0.5 -SET_PIN PIN=soft_pwm_pin VALUE=0.75 CYCLE_TIME=0.5 -SET_PIN PIN=soft_pwm_pin VALUE=0.75 CYCLE_TIME=0.75 +SET_PIN PIN=cycle_pwm_pin VALUE=0.5 CYCLE_TIME=0.5 +SET_PIN PIN=cycle_pwm_pin VALUE=0.5 CYCLE_TIME=0.5 +SET_PIN PIN=cycle_pwm_pin VALUE=0.75 CYCLE_TIME=0.5 +SET_PIN PIN=cycle_pwm_pin VALUE=0.75 CYCLE_TIME=0.75 # PWM tool # Basic test From 43a9685c581a46ea161faa297c0a29f3bcd7194e Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Sun, 15 Jan 2023 17:14:14 -0500 Subject: [PATCH 56/63] mcu: Remove support for set_pwm() cycle_time parameter Signed-off-by: Kevin O'Connor --- klippy/extras/multi_pin.py | 4 ++-- klippy/extras/replicape.py | 2 +- klippy/extras/sx1509.py | 2 +- klippy/mcu.py | 37 +++++++++---------------------------- 4 files changed, 13 insertions(+), 32 deletions(-) diff --git a/klippy/extras/multi_pin.py b/klippy/extras/multi_pin.py index f5177bd97..c834ee077 100644 --- a/klippy/extras/multi_pin.py +++ b/klippy/extras/multi_pin.py @@ -46,9 +46,9 @@ def setup_cycle_time(self, cycle_time, hardware_pwm=False): def set_digital(self, print_time, value): for mcu_pin in self.mcu_pins: mcu_pin.set_digital(print_time, value) - def set_pwm(self, print_time, value, cycle_time=None): + def set_pwm(self, print_time, value): for mcu_pin in self.mcu_pins: - mcu_pin.set_pwm(print_time, value, cycle_time) + mcu_pin.set_pwm(print_time, value) def load_config_prefix(config): return PrinterMultiPin(config) diff --git a/klippy/extras/replicape.py b/klippy/extras/replicape.py index d6737a359..ab501cafc 100644 --- a/klippy/extras/replicape.py +++ b/klippy/extras/replicape.py @@ -67,7 +67,7 @@ def _build_config(self): cmd_queue = self._mcu.alloc_command_queue() self._set_cmd = self._mcu.lookup_command( "queue_pca9685_out oid=%c clock=%u value=%hu", cq=cmd_queue) - def set_pwm(self, print_time, value, cycle_time=None): + def set_pwm(self, print_time, value): clock = self._mcu.print_time_to_clock(print_time) if self._invert: value = 1. - value diff --git a/klippy/extras/sx1509.py b/klippy/extras/sx1509.py index 51080fe24..3bfecb783 100644 --- a/klippy/extras/sx1509.py +++ b/klippy/extras/sx1509.py @@ -183,7 +183,7 @@ def setup_start_value(self, start_value, shutdown_value): shutdown_value = 1. - shutdown_value self._start_value = max(0., min(1., start_value)) self._shutdown_value = max(0., min(1., shutdown_value)) - def set_pwm(self, print_time, value, cycle_time=None): + def set_pwm(self, print_time, value): self._sx1509.set_register(self._i_on_reg, ~int(255 * value) if not self._invert else int(255 * value) & 0xFF) diff --git a/klippy/mcu.py b/klippy/mcu.py index 7f784de7f..f9b547c94 100644 --- a/klippy/mcu.py +++ b/klippy/mcu.py @@ -381,9 +381,9 @@ def __init__(self, mcu, pin_params): self._pin = pin_params['pin'] self._invert = pin_params['invert'] self._start_value = self._shutdown_value = float(self._invert) - self._last_clock = self._last_cycle_ticks = 0 + self._last_clock = 0 self._pwm_max = 0. - self._set_cmd = self._set_cycle_ticks = None + self._set_cmd = None def get_mcu(self): return self._mcu def setup_max_duration(self, max_duration): @@ -441,40 +441,21 @@ def _build_config(self): self._mcu.add_config_cmd( "set_digital_out_pwm_cycle oid=%d cycle_ticks=%d" % (self._oid, cycle_ticks)) - self._last_cycle_ticks = cycle_ticks + self._pwm_max = float(cycle_ticks) svalue = int(self._start_value * cycle_ticks + 0.5) self._mcu.add_config_cmd( "queue_digital_out oid=%d clock=%d on_ticks=%d" % (self._oid, self._last_clock, svalue), is_init=True) self._set_cmd = self._mcu.lookup_command( "queue_digital_out oid=%c clock=%u on_ticks=%u", cq=cmd_queue) - self._set_cycle_ticks = self._mcu.lookup_command( - "set_digital_out_pwm_cycle oid=%c cycle_ticks=%u", cq=cmd_queue) - def set_pwm(self, print_time, value, cycle_time=None): - clock = self._mcu.print_time_to_clock(print_time) - minclock = self._last_clock - self._last_clock = clock + def set_pwm(self, print_time, value): if self._invert: value = 1. - value - if self._hardware_pwm: - v = int(max(0., min(1., value)) * self._pwm_max + 0.5) - self._set_cmd.send([self._oid, clock, v], - minclock=minclock, reqclock=clock) - return - # Soft pwm update - if cycle_time is None: - cycle_time = self._cycle_time - cycle_ticks = self._mcu.seconds_to_clock(cycle_time) - if cycle_ticks != self._last_cycle_ticks: - if cycle_ticks >= 1<<31: - raise self._mcu.get_printer().command_error( - "PWM cycle time too large") - self._set_cycle_ticks.send([self._oid, cycle_ticks], - minclock=minclock, reqclock=clock) - self._last_cycle_ticks = cycle_ticks - on_ticks = int(max(0., min(1., value)) * float(cycle_ticks) + 0.5) - self._set_cmd.send([self._oid, clock, on_ticks], - minclock=minclock, reqclock=clock) + v = int(max(0., min(1., value)) * self._pwm_max + 0.5) + clock = self._mcu.print_time_to_clock(print_time) + self._set_cmd.send([self._oid, clock, v], + minclock=self._last_clock, reqclock=clock) + self._last_clock = clock class MCU_adc: def __init__(self, mcu, pin_params): From 55e46aa6250382367af30d3a5e005e919d772d32 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Sat, 20 Jan 2024 19:30:21 -0500 Subject: [PATCH 57/63] armcm_boot: Avoid invoking functions in reset_handler_stage_two() Avoid calling memset() and memcpy() prior to copying the ram and clearing the bss. Also, place both ResetHandler() and reset_handler_stage_two() in an explicit ".text.armcm_boot" linker section. These changes make it easier to support targets that want to run all code in ram. Signed-off-by: Kevin O'Connor --- src/generic/armcm_boot.c | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/src/generic/armcm_boot.c b/src/generic/armcm_boot.c index f83ca60de..9d2ce0bbf 100644 --- a/src/generic/armcm_boot.c +++ b/src/generic/armcm_boot.c @@ -22,7 +22,31 @@ extern uint32_t _stack_end; * Basic interrupt handlers ****************************************************************/ -static void __noreturn +// Inlined version of memset (to avoid function calls during intial boot code) +static void __always_inline +boot_memset(void *s, int c, size_t n) +{ + volatile uint32_t *p = s; + while (n) { + *p++ = c; + n -= sizeof(*p); + } +} + +// Inlined version of memcpy (to avoid function calls during intial boot code) +static void __always_inline +boot_memcpy(void *dest, const void *src, size_t n) +{ + const uint32_t *s = src; + volatile uint32_t *d = dest; + while (n) { + *d++ = *s++; + n -= sizeof(*d); + } +} + +// Main initialization code (called from ResetHandler below) +static void __noreturn __section(".text.armcm_boot.stage_two") reset_handler_stage_two(void) { int i; @@ -60,10 +84,10 @@ reset_handler_stage_two(void) // Copy global variables from flash to ram uint32_t count = (&_data_end - &_data_start) * 4; - __builtin_memcpy(&_data_start, &_data_flash, count); + boot_memcpy(&_data_start, &_data_flash, count); // Clear the bss segment - __builtin_memset(&_bss_start, 0, (&_bss_end - &_bss_start) * 4); + boot_memset(&_bss_start, 0, (&_bss_end - &_bss_start) * 4); barrier(); @@ -80,7 +104,7 @@ reset_handler_stage_two(void) // Initial code entry point - invoked by the processor after a reset // Reset interrupts and stack to take control from bootloaders -void +void __section(".text.armcm_boot.stage_one") ResetHandler(void) { __disable_irq(); From 23c5b20f5ba6bbb27c3b5404f7b0912dba511eb5 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Sat, 20 Jan 2024 19:58:23 -0500 Subject: [PATCH 58/63] rp2040: Always link using rp2040_link.lds.S Use the rp2040 specific linker script even when using a bootloader. Signed-off-by: Kevin O'Connor --- src/rp2040/Makefile | 2 +- src/rp2040/rp2040_link.lds.S | 12 ++++++++++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/src/rp2040/Makefile b/src/rp2040/Makefile index 71ed90a0c..641990140 100644 --- a/src/rp2040/Makefile +++ b/src/rp2040/Makefile @@ -55,7 +55,7 @@ $(OUT)klipper.bin: $(OUT)klipper.elf $(Q)$(OBJCOPY) -O binary $< $@ rptarget-$(CONFIG_RP2040_HAVE_BOOTLOADER) := $(OUT)klipper.bin -rplink-$(CONFIG_RP2040_HAVE_BOOTLOADER) := $(OUT)src/generic/armcm_link.ld +rplink-$(CONFIG_RP2040_HAVE_BOOTLOADER) := $(OUT)src/rp2040/rp2040_link.ld # Set klipper.elf linker rules target-y += $(rptarget-y) diff --git a/src/rp2040/rp2040_link.lds.S b/src/rp2040/rp2040_link.lds.S index 43d6115e4..2052cdbd4 100644 --- a/src/rp2040/rp2040_link.lds.S +++ b/src/rp2040/rp2040_link.lds.S @@ -1,6 +1,6 @@ // rp2040 linker script (based on armcm_link.lds.S and customized for stage2) // -// Copyright (C) 2019-2021 Kevin O'Connor +// Copyright (C) 2019-2024 Kevin O'Connor // // This file may be distributed under the terms of the GNU GPLv3 license. @@ -9,9 +9,15 @@ OUTPUT_FORMAT("elf32-littlearm", "elf32-littlearm", "elf32-littlearm") OUTPUT_ARCH(arm) +#if CONFIG_RP2040_HAVE_STAGE2 + #define ROM_ORIGIN 0x10000000 +#else + #define ROM_ORIGIN CONFIG_FLASH_APPLICATION_ADDRESS +#endif + MEMORY { - rom (rx) : ORIGIN = 0x10000000 , LENGTH = CONFIG_FLASH_SIZE + rom (rx) : ORIGIN = ROM_ORIGIN , LENGTH = CONFIG_FLASH_SIZE ram (rwx) : ORIGIN = CONFIG_RAM_START , LENGTH = CONFIG_RAM_SIZE } @@ -19,7 +25,9 @@ SECTIONS { .text : { . = ALIGN(4); +#if CONFIG_RP2040_HAVE_STAGE2 KEEP(*(.boot2)) +#endif _text_vectortable_start = .; KEEP(*(.vector_table)) _text_vectortable_end = .; From 44e79e0c37a440212a1b7f974adbdbe250e91f83 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Sat, 20 Jan 2024 19:33:21 -0500 Subject: [PATCH 59/63] rp2040: Run all code from ram Place all normal code into ram. This reduces the chance that rp2040 instruction cache misses could cause subtle timing issues. Signed-off-by: Kevin O'Connor --- src/rp2040/rp2040_link.lds.S | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/rp2040/rp2040_link.lds.S b/src/rp2040/rp2040_link.lds.S index 2052cdbd4..fd178847c 100644 --- a/src/rp2040/rp2040_link.lds.S +++ b/src/rp2040/rp2040_link.lds.S @@ -31,8 +31,7 @@ SECTIONS _text_vectortable_start = .; KEEP(*(.vector_table)) _text_vectortable_end = .; - *(.text .text.*) - *(.rodata .rodata*) + *(.text.armcm_boot*) } > rom . = ALIGN(4); @@ -42,7 +41,9 @@ SECTIONS { . = ALIGN(4); _data_start = .; + *(.text .text.*) *(.ramfunc .ramfunc.*); + *(.rodata .rodata*) *(.data .data.*); . = ALIGN(4); _data_end = .; From f1982edcd5e68328a824ae9998e63778b08581e7 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Sat, 20 Jan 2024 20:04:16 -0500 Subject: [PATCH 60/63] rp2040: Load vectortable into ram Load the interrupt vector table into ram at startup. This reduces the chance of a flash cache access causing timing instability. Signed-off-by: Kevin O'Connor --- src/rp2040/main.c | 21 +++++++++++++++++++++ src/rp2040/rp2040_link.lds.S | 6 ++++++ 2 files changed, 27 insertions(+) diff --git a/src/rp2040/main.c b/src/rp2040/main.c index 0b144d0bb..e7b64e5f0 100644 --- a/src/rp2040/main.c +++ b/src/rp2040/main.c @@ -16,6 +16,26 @@ #include "sched.h" // sched_main +/**************************************************************** + * Ram IRQ vector table + ****************************************************************/ + +// Copy vector table to ram and activate it +static void +enable_ram_vectortable(void) +{ + // Symbols created by rp2040_link.lds.S linker script + extern uint32_t _ram_vectortable_start, _ram_vectortable_end; + extern uint32_t _text_vectortable_start; + + uint32_t count = (&_ram_vectortable_end - &_ram_vectortable_start) * 4; + __builtin_memcpy(&_ram_vectortable_start, &_text_vectortable_start, count); + barrier(); + + SCB->VTOR = (uint32_t)&_ram_vectortable_start; +} + + /**************************************************************** * Bootloader ****************************************************************/ @@ -145,6 +165,7 @@ clock_setup(void) void armcm_main(void) { + enable_ram_vectortable(); clock_setup(); sched_main(); } diff --git a/src/rp2040/rp2040_link.lds.S b/src/rp2040/rp2040_link.lds.S index fd178847c..9b0264a2b 100644 --- a/src/rp2040/rp2040_link.lds.S +++ b/src/rp2040/rp2040_link.lds.S @@ -37,6 +37,12 @@ SECTIONS . = ALIGN(4); _data_flash = .; + .ram_vectortable (NOLOAD) : { + _ram_vectortable_start = .; + . = . + ( _text_vectortable_end - _text_vectortable_start ) ; + _ram_vectortable_end = .; + } > ram + .data : AT (_data_flash) { . = ALIGN(4); From 5e433fff06148fde3f0046ad7f1121e9af2181d9 Mon Sep 17 00:00:00 2001 From: Kevin O'Connor Date: Mon, 22 Jan 2024 12:55:22 -0500 Subject: [PATCH 61/63] rp2040: Only change SPI settings while peripheral is disabled Make sure to disable/enable the peripheral to ensure the clock polarity is properly set prior to a change in CS. Signed-off-by: Kevin O'Connor --- src/rp2040/spi.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/rp2040/spi.c b/src/rp2040/spi.c index e6aafa005..758d57308 100644 --- a/src/rp2040/spi.c +++ b/src/rp2040/spi.c @@ -89,8 +89,12 @@ void spi_prepare(struct spi_config config) { spi_hw_t *spi = config.spi; + if (spi->cr0 == config.cr0 && spi->cpsr == config.cpsr) + return; + spi->cr1 = 0; spi->cr0 = config.cr0; spi->cpsr = config.cpsr; + spi->cr1 = SPI_SSPCR1_SSE_BITS; } void From 5e3daa6f21d6485e4e757d0df00e01a13c968541 Mon Sep 17 00:00:00 2001 From: voidtrance <30448940+voidtrance@users.noreply.github.com> Date: Fri, 26 Jan 2024 14:50:01 -0800 Subject: [PATCH 62/63] bed_mesh: Implement adaptive bed mesh (#6461) Adaptive bed mesh allows the bed mesh algorithm to probe only the area of the bed that is being used by the current print. It uses [exclude_objects] to get a list of the printed objects and their area on the bed. It, then, modifies the bed mesh parameters so only the area used by the objects is measured. Adaptive bed mesh works on both cartesian and delta kinematics printers. On Delta printers, the algorithm, adjusts the origin point and radius in order to translate the area of the bed being probe. Signed-off-by: Mitko Haralanov Signed-off-by: Kyle Hansen Signed-off-by: Kevin O'Connor --- docs/Bed_Mesh.md | 60 +++++++++++++- docs/Config_Reference.md | 3 + docs/img/adaptive_bed_mesh.svg | 4 + docs/img/adaptive_bed_mesh_margin.svg | 4 + klippy/extras/bed_mesh.py | 114 +++++++++++++++++++++++++- 5 files changed, 181 insertions(+), 4 deletions(-) create mode 100644 docs/img/adaptive_bed_mesh.svg create mode 100644 docs/img/adaptive_bed_mesh_margin.svg diff --git a/docs/Bed_Mesh.md b/docs/Bed_Mesh.md index d2a417dd4..ada3de29d 100644 --- a/docs/Bed_Mesh.md +++ b/docs/Bed_Mesh.md @@ -370,14 +370,68 @@ are identified in green. ![bedmesh_interpolated](img/bedmesh_faulty_regions.svg) +### Adaptive Meshes + +Adaptive bed meshing is a way to speed up the bed mesh generation by only probing +the area of the bed used by the objects being printed. When used, the method will +automatically adjust the mesh parameters based on the area occupied by the defined +print objects. + +The adapted mesh area will be computed from the area defined by the boundaries of all +the defined print objects so it covers every object, including any margins defined in +the configuration. After the area is computed, the number of probe points will be +scaled down based on the ratio of the default mesh area and the adapted mesh area. To +illustrate this consider the following example: + +For a 150mmx150mm bed with `mesh_min` set to `25,25` and `mesh_max` set to `125,125`, +the default mesh area is a 100mmx100mm square. An adapted mesh area of `50,50` +means a ratio of `0.5x0.5` between the adapted area and default mesh area. + +If the `bed_mesh` configuration specified `probe_count` as `7x7`, the adapted bed +mesh will use 4x4 probe points (7 * 0.5 rounded up). + +![adaptive_bedmesh](img/adaptive_bed_mesh.svg) + +``` +[bed_mesh] +speed: 120 +horizontal_move_z: 5 +mesh_min: 35, 6 +mesh_max: 240, 198 +probe_count: 5, 3 +adaptive_margin: 5 +``` + +- `adaptive_margin` \ + _Default Value: 0_ \ + Margin (in mm) to add around the area of the bed used by the defined objects. The diagram + below shows the adapted bed mesh area with an `adaptive_margin` of 5mm. The adapted mesh + area (area in green) is computed as the used bed area (area in blue) plus the defined margin. + + ![adaptive_bedmesh_margin](img/adaptive_bed_mesh_margin.svg) + +By nature, adaptive bed meshes use the objects defined by the Gcode file being printed. +Therefore, it is expected that each Gcode file will generate a mesh that probes a different +area of the print bed. Therefore, adapted bed meshes should not be re-used. The expectation +is that a new mesh will be generated for each print if adaptive meshing is used. + +It is also important to consider that adaptive bed meshing is best used on machines that can +normally probe the entire bed and achieve a maximum variance less than or equal to 1 layer +height. Machines with mechanical issues that a full bed mesh normally compensates for may +have undesirable results when attempting print moves **outside** of the probed area. If a +full bed mesh has a variance greater than 1 layer height, caution must be taken when using +adaptive bed meshes and attempting print moves outside of the meshed area. + ## Bed Mesh Gcodes ### Calibration `BED_MESH_CALIBRATE PROFILE= METHOD=[manual | automatic] [=] - [=]`\ + [=] [ADAPTIVE=[0|1] [ADAPTIVE_MARGIN=]`\ _Default Profile: default_\ -_Default Method: automatic if a probe is detected, otherwise manual_ +_Default Method: automatic if a probe is detected, otherwise manual_ \ +_Default Adaptive: 0_ \ +_Default Adaptive Margin: 0_ Initiates the probing procedure for Bed Mesh Calibration. @@ -399,6 +453,8 @@ following parameters are available: - `ROUND_PROBE_COUNT` - All beds: - `ALGORITHM` + - `ADAPTIVE` + - `ADAPTIVE_MARGIN` See the configuration documentation above for details on how each parameter applies to the mesh. diff --git a/docs/Config_Reference.md b/docs/Config_Reference.md index 3b2f17709..985408091 100644 --- a/docs/Config_Reference.md +++ b/docs/Config_Reference.md @@ -991,6 +991,9 @@ Visual Examples: # Optional points that define a faulty region. See docs/Bed_Mesh.md # for details on faulty regions. Up to 99 faulty regions may be added. # By default no faulty regions are set. +#adaptive_margin: +# An optional margin (in mm) to be added around the bed area used by +# the defined print objects when generating an adaptive mesh. ``` ### [bed_tilt] diff --git a/docs/img/adaptive_bed_mesh.svg b/docs/img/adaptive_bed_mesh.svg new file mode 100644 index 000000000..954ca0b32 --- /dev/null +++ b/docs/img/adaptive_bed_mesh.svg @@ -0,0 +1,4 @@ + + + +
Origin
(0,0)
Origin...

Legend


Legend
Object Polygon
Object Polygon
Used Bed Area
Used Bed Area
Text is not SVG - cannot display
\ No newline at end of file diff --git a/docs/img/adaptive_bed_mesh_margin.svg b/docs/img/adaptive_bed_mesh_margin.svg new file mode 100644 index 000000000..6c6216d04 --- /dev/null +++ b/docs/img/adaptive_bed_mesh_margin.svg @@ -0,0 +1,4 @@ + + + +
Origin
(0,0)
Origin...

Legend


Legend
Object Polygon
Object Polygon
Used Bed Area
Used Bed Area
Adapted Bed Mesh Area
Adapted Bed Mesh Area
(90,75)
(90,75)
(130,140)
(130,140)
(125,135)
(125,1...
(95,120)
(95,12...
(60,90)
(60,90)
(90,130)
(90,13...
(95,80)
(95,80)
(125,110)
(125,1...
Text is not SVG - cannot display
\ No newline at end of file diff --git a/klippy/extras/bed_mesh.py b/klippy/extras/bed_mesh.py index 6c714304f..9e30846f0 100644 --- a/klippy/extras/bed_mesh.py +++ b/klippy/extras/bed_mesh.py @@ -1,6 +1,5 @@ # Mesh Bed Leveling # -# Copyright (C) 2018 Kevin O'Connor # Copyright (C) 2018-2019 Eric Callahan # # This file may be distributed under the terms of the GNU GPLv3 license. @@ -291,6 +290,7 @@ def __init__(self, config, bedmesh): self.orig_config = {'radius': None, 'origin': None} self.radius = self.origin = None self.mesh_min = self.mesh_max = (0., 0.) + self.adaptive_margin = config.getfloat('adaptive_margin', 0.0) self.zero_ref_pos = config.getfloatlist( "zero_reference_position", None, count=2 ) @@ -573,6 +573,113 @@ def _verify_algorithm(self, error): "interpolation. Configured Probe Count: %d, %d" % (self.mesh_config['x_count'], self.mesh_config['y_count'])) params['algo'] = 'lagrange' + def set_adaptive_mesh(self, gcmd): + if not gcmd.get_int('ADAPTIVE', 0): + return False + exclude_objects = self.printer.lookup_object("exclude_object", None) + if exclude_objects is None: + gcmd.respond_info("Exclude objects not enabled. Using full mesh...") + return False + objects = exclude_objects.get_status().get("objects", []) + if not objects: + return False + margin = gcmd.get_float('ADAPTIVE_MARGIN', self.adaptive_margin) + + # List all exclude_object points by axis and iterate over + # all polygon points, and pick the min and max or each axis + list_of_xs = [] + list_of_ys = [] + gcmd.respond_info("Found %s objects" % (len(objects))) + for obj in objects: + for point in obj["polygon"]: + list_of_xs.append(point[0]) + list_of_ys.append(point[1]) + + # Define bounds of adaptive mesh area + mesh_min = [min(list_of_xs), min(list_of_ys)] + mesh_max = [max(list_of_xs), max(list_of_ys)] + adjusted_mesh_min = [x - margin for x in mesh_min] + adjusted_mesh_max = [x + margin for x in mesh_max] + + # Force margin to respect original mesh bounds + adjusted_mesh_min[0] = max(adjusted_mesh_min[0], + self.orig_config["mesh_min"][0]) + adjusted_mesh_min[1] = max(adjusted_mesh_min[1], + self.orig_config["mesh_min"][1]) + adjusted_mesh_max[0] = min(adjusted_mesh_max[0], + self.orig_config["mesh_max"][0]) + adjusted_mesh_max[1] = min(adjusted_mesh_max[1], + self.orig_config["mesh_max"][1]) + + adjusted_mesh_size = (adjusted_mesh_max[0] - adjusted_mesh_min[0], + adjusted_mesh_max[1] - adjusted_mesh_min[1]) + + # Compute a ratio between the adapted and original sizes + ratio = (adjusted_mesh_size[0] / + (self.orig_config["mesh_max"][0] - + self.orig_config["mesh_min"][0]), + adjusted_mesh_size[1] / + (self.orig_config["mesh_max"][1] - + self.orig_config["mesh_min"][1])) + + gcmd.respond_info("Original mesh bounds: (%s,%s)" % + (self.orig_config["mesh_min"], + self.orig_config["mesh_max"])) + gcmd.respond_info("Original probe count: (%s,%s)" % + (self.mesh_config["x_count"], + self.mesh_config["y_count"])) + gcmd.respond_info("Adapted mesh bounds: (%s,%s)" % + (adjusted_mesh_min, adjusted_mesh_max)) + gcmd.respond_info("Ratio: (%s, %s)" % ratio) + + new_x_probe_count = int( + math.ceil(self.mesh_config["x_count"] * ratio[0])) + new_y_probe_count = int( + math.ceil(self.mesh_config["y_count"] * ratio[1])) + + # There is one case, where we may have to adjust the probe counts: + # axis0 < 4 and axis1 > 6 (see _verify_algorithm). + min_num_of_probes = 3 + if max(new_x_probe_count, new_y_probe_count) > 6 and \ + min(new_x_probe_count, new_y_probe_count) < 4: + min_num_of_probes = 4 + + new_x_probe_count = max(min_num_of_probes, new_x_probe_count) + new_y_probe_count = max(min_num_of_probes, new_y_probe_count) + + gcmd.respond_info("Adapted probe count: (%s,%s)" % + (new_x_probe_count, new_y_probe_count)) + + # If the adapted mesh size is too small, adjust it to something + # useful. + adjusted_mesh_size = (max(adjusted_mesh_size[0], new_x_probe_count), + max(adjusted_mesh_size[1], new_y_probe_count)) + + if self.radius is not None: + adapted_radius = math.sqrt((adjusted_mesh_size[0] ** 2) + + (adjusted_mesh_size[1] ** 2)) / 2 + adapted_origin = (adjusted_mesh_min[0] + + (adjusted_mesh_size[0] / 2), + adjusted_mesh_min[1] + + (adjusted_mesh_size[1] / 2)) + to_adapted_origin = math.sqrt(adapted_origin[0]**2 + + adapted_origin[1]**2) + # If the adapted mesh size is smaller than the default/full + # mesh, adjust the parameters. Otherwise, just do the full mesh. + if adapted_radius + to_adapted_origin < self.radius: + self.radius = adapted_radius + self.origin = adapted_origin + self.mesh_min = (-self.radius, -self.radius) + self.mesh_max = (self.radius, self.radius) + self.mesh_config["x_count"] = self.mesh_config["y_count"] = \ + max(new_x_probe_count, new_y_probe_count) + else: + self.mesh_min = adjusted_mesh_min + self.mesh_max = adjusted_mesh_max + self.mesh_config["x_count"] = new_x_probe_count + self.mesh_config["y_count"] = new_y_probe_count + self._profile_name = None + return True def update_config(self, gcmd): # reset default configuration self.radius = self.orig_config['radius'] @@ -616,6 +723,8 @@ def update_config(self, gcmd): self.mesh_config['algo'] = gcmd.get('ALGORITHM').strip().lower() need_cfg_update = True + need_cfg_update |= self.set_adaptive_mesh(gcmd) + if need_cfg_update: self._verify_algorithm(gcmd.error) self._generate_points(gcmd.error) @@ -781,7 +890,8 @@ def probe_finalize(self, offsets, positions): z_mesh.set_zero_reference(*self.zero_ref_pos) self.bedmesh.set_mesh(z_mesh) self.gcode.respond_info("Mesh Bed Leveling Complete") - self.bedmesh.save_profile(self._profile_name) + if self._profile_name is not None: + self.bedmesh.save_profile(self._profile_name) def _dump_points(self, probed_pts, corrected_pts, offsets): # logs generated points with offset applied, points received # from the finalize callback, and the list of corrected points From 600e89ae8c759613a3c6fc2b24d0a62d00e6baf2 Mon Sep 17 00:00:00 2001 From: Kiswich Date: Sat, 27 Jan 2024 22:23:50 +0800 Subject: [PATCH 63/63] virtual_sdcard: fix virtual SD file position count (#6472) Signed-off-by: Zhang Qiwei --- klippy/extras/virtual_sdcard.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/klippy/extras/virtual_sdcard.py b/klippy/extras/virtual_sdcard.py index 31f283ff2..1bb914ab2 100644 --- a/klippy/extras/virtual_sdcard.py +++ b/klippy/extras/virtual_sdcard.py @@ -258,7 +258,7 @@ def work_handler(self, eventtime): # Dispatch command self.cmd_from_sd = True line = lines.pop() - next_file_position = self.file_position + len(line) + 1 + next_file_position = self.file_position + len(line.encode()) + 1 self.next_file_position = next_file_position try: self.gcode.run_script(line)